var/home/core/zuul-output/0000755000175000017500000000000015134331106014522 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015134346325015477 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000312171415134346157020270 0ustar corecoreoqikubelet.log]o[=r+BrvEZƐȒ!ɦ[M cSy-Hgf1p'ZIg giζ߷;U/;?Dެxfޮo߾n^ՠ3C4%Ϳf\ϘקbzuV6/?;|Yݿ|y+vŊ7 >=*EbqXgnxh{nۯSa3WկD*%(Ϗ_϶^ +SI211zysw߹l;] Hs %yqf2=;OO pzM.v=F|;F|`zIlp_@oEy5 fs&2x*g+W4m ɭiE߳Kzn!#Šcv cXk?`G`&Rכ߿YKSGo/wHF6":=3Ȑ 3xԝd){Ts}cZ%BdAR/#-on#D"ެrFg4, $`Cfw":ɴ@=zN{f}\{+>2^G) u.`l(Sm&F4a0>eBmFR5]!PI6f٘"y/(":[#;`1}+׼ s'ϨF&%3KpNGIrND}2SRC ss]QzH.ad!rJBe`V +|i}}THW{y|*BP3m3A- ZPm/OE'c/z"BRr\lwCzDiQJxPsL]3,=M`fͷ~Vܭ54>d,Qw+C}F][UVYE QGn0Ƞѻ>.ww}(o./WY<͉#O5 H G+o6C9zg|~ €G.y+Vܯr KH^>z<h\d9:bġ7 -Pwȩl;M@f̞Qj_P]2@vN (C9zO|$Uvވ+ZoIfzᑇy ^t }|#qKrdK\D2s&[#bE(mV9ىN嫋{o嚲W9ȝQEkT/*BR =z*.h4(^&-Wg̫b]OB/EFδW~V 9;Zp0S]UIĀ')4 B^R4t; *퇄up}du@69آs;DPsiz39HoN λC?; H^-¸Z( +"@@%'0MttwB~R2EL9j7e\(Uё$׿atyХ*Wt1z\+`E8rVQUxMҔ&ۈtq2Q'Qgjy"}͹C,a4A{C U =T+o!er] g"tta[I!;c%7$V<[+*J:AI \:-rR b B"~?4 W4B3lLRD|@sfځ9' ? j럚Sʼn>^u w`?ԖQʡCOx]*9W C;6)OSCOkIL*򰆔l=q VJީw!pN7,/M(.ΰdƛޜP16$o tBFKskޒ1,%$BվCh,M%<>"ہ)Za@N}YJz{ћrۉthxY/$Zøu32EʉD'MS2}t i:Y`cФIX0$+`قSᔙD'Ad [kPF =ttSE *b9ģ7$Ϸ Mo{C Ӹ!uWȳ)'jw&+uߖt*:͵UMQrN@fYDtEYZb4-UCr[٪L.2teB ˛"ո{uM ]}EӇF 8'9@OVvnNbm: X„RDXfיa }fqG*YƩ{P0K=( $hC=h2@M+ `@P4Re]1he}+|]eO,v^ȹ [=zX[xꚯNI7c<ۃ'B? x1(5AKRVF2ɌУլ F "vuhc=JS\kkZAY`R"Hr1]-oR[^oI]${&齙4q6y{&G`%$8Tt ȥwj   %*e5-wFp,a~fjqq6xY,d$`!qIv꜒O"T[1!I!NwL}\|u.b3XR\(L _nJB/_xY6# ſԸf}9V}']6C9C7sn/e552s4IFk^ك󨔖I@[ tWv Fiw9J֥WmN^<.eܢMρ'JÖŢո%^gQ=p2YaI"&ư%wm+`VLJsC>?5rk{-cS`y_B}V v,{*1ߎ% qƦat:=G=vNv dߋ{Ny[$ {ɴ6hOI']dC5`t9:GO: FmlN*:g^;T^B0$B=aK`3CmF1K>*Mk{_'֜dF${T-n,'}6ȴ .#Sqד9m5zoX#Z{/ҭ٫3@/%hJKZ|Q;|m쇲= T u)1 QLLj`K -D,(7N*,< JDA?VǞ©H\@mO~W-ce{0d8o X]ԏ޻(*exBaEW :bT:>%:ò6PT:”QVay Y{Ÿ>14`SČ.HPdp12 (7 _:+$ߗv{wzM$VbήdsOw<}#b[E7imH'Y`;5{$ь'gISzp; AQvDIyHc<槔w w?38v?Lsb s "NDr3\{J KP/ߢ/emPW֦?>Y5p&nr0:9%Ws$Wc0FS=>Qp:!DE5^9-0 R2ڲ]ew۵jI\'iħ1 {\FPG"$$ {+!˨?EP' =@~edF \r!٤ã_e=P1W3c +A)9V ]rVmeK\4? 8'*MTox6[qn2XwK\^-ޖA2U]E_Dm5^"d*MQǜq؈f+C/tfRxeKboc5Iv{K TV}uuyk s" &ﱏҞO/ont~]5\?lL!M7uTźmr(Uxbbqe5rZ HҘ3ڴ(|e@ew>w3C=9k-{p>րd^T@eFZ#WWwYzK uK r؛6V L)auS6=`#(TO֙`mn Lv%7mSU@n_Vۀl9BIcSxlT![`[klzFض˪.l >7l@ΖLl gEj gWUDnr7AG;lU6ieabp៚U|,}S@t1:X _ .xI_7ve Z@7IX/C7@u BGڔE7M/k $q^hڧ};naU%~X!^C5Aw͢.@d!@dU}b? -ʏw |VvlK۴ymkiK% 0OFjT_kPW1mk%?\@R>XCl}b ,8; :.b9m]XaINE`!6uOhUuta^xN@˭d- T5 $4ذ:[a>֋&"_ }Oõϸ~rj uw\h~M il[ 2pCaOok.X0C?~[:^Pr򣏷y@/ڠ --i!M5mjozEƨ||Yt,=d#uЇ  l]չoݴmqV".lCqBѷ /![auPmpnEjus]2{2#b'$?T3{k>h+@]*pp桸]%nĴFԨlu |VXnq#r:kg_Q1,MNi˰ 7#`VCpᇽmpM+tWuk0 q /} 5 ¶]fXEj@5JcU_b@JS`wYmJ gEk2'0/> unKs^C6B WEt7M'#|kf1:X l]ABC {kanW{ 6 g`_w\|8Fjȡstuf%Plx3E#zmxfU S^ 3_`wRY}@ŹBz²?mК/mm}m"Gy4dl\)cb<>O0BďJrDd\TDFMEr~q#i}$y3.*j) qQa% |`bEۈ8S 95JͩA3SX~߃ʟ~㍖›f!OI1R~-6͘!?/Vvot4~6I@GNݖ-m[d<-l9fbn,'eO2sٟ+AWzw A<4 }w"*mj8{ P&Y#ErwHhL2cPr Wҭюky7aXt?2 'so fnHXx1o@0TmBLi0lhѦ* _9[3L`I,|J @xS}NEij]Qexx*lJF#+L@-Ց2\5+mn(fnc.^xt4gD638L"!}LpInTeD_1ZrbkI%8zPU:LNTPlI&N:o&2BVb+uxZ`v?7"I8hp A&?a(8E-DHa%LMg2:-ŷX(ǒ>,ݵ𴛾é5Zٵ]z"]òƓVgzEY9[Nj_vZ :jJ2^b_ F w#X6Sho禮<u8.H#',c@V8 iRX &4ڻ8zݽ.7jhvQ:H0Np: qfՋ40oW&&ף 7^s08p15w q o(uLYQB_dWoc0a#K1P,8]P)\wEZ(VҠQBT^e^0F;)CtT+{`Bh"% !.bBQPnT4ƈRa[F=3}+BVE~8R{3,>0|:,5j358W]>!Q1"6oT[ҟ^T;725Xa+wqlR)<#!9!籈K*:!@NI^S"H=ofLx _lp ꖚӜ3C 4dM @x>ۙZh _uoֺip&1ڙʪ4\RF_04H8@>fXmpLJ5jRS}_D U4x[c) ,`̔Dvckk5Ťã0le۞]o~oW(91ݧ$uxp/Cq6Un9%ZxðvGL qG $ X:w06 E=oWlzN7st˪C:?*|kިfc]| &ب^[%F%LI<0(씖;4A\`TQ.b0NH;ݹ/n -3@u;Af GxѼi7(Zn(q{ƅxt*m)w\8Xظ0쨓و$.wܔ@ 1Q1e?v(&{yM`5qJ%cQs˪n&:.x[5AeV oLfp#zRbn5݅\ش2t R/Ҁ:Rg:(~iks<; 5}h=s BG.k7%,ڃpzU1h dqe,}.|S,`W˾,(9cqjB\Y%Ɓ}%gK(DjrO' ,: k5u]#LLb FDK|۟OS栉 A*[Jd >|ק_;%X6Q@d 8&a)a.#ۿD> vfA{$g ăyd) SK?ɧc/"ɭex^k$# $V :]ظ,9[/Y5d{zrBܖ6Hlc "mKv~[uLU4lZ;xEN'oI㤛rP*jC# 6@dmHg1$ʇȠh#CBΤ{sTQ{%w)7@y1K^ ].Y$46[B-3%OONw8d`Q4d$x0t8@t]y1T\YAidtxBG:pɨU<2~sQ37.&lOľu74c?MՏړ@ -N*CB=i3,qjGkUտu6k Cb8hs&sM@-=X(i7=@He%ISd$&iA|i MiʏݸT{r[j顒x.Ƞ"m@Hy_I )j|s#RGI!dTKL&4K>#stV \'xMgaSZNg8>e!^f%cYr]qs:"̊;isXa]d+"v=x7p.fZCg_Ys;pE&\U}ܫSh])qKYAـhhdEnU14&G * QIQs;rԩ.k83֖8Muqu_48dHܥlWW q>fu6+'}xu\Veelz`Zbym gp8펠ˋֆ:1IC8qٞ\vXçL ]X/r}7O}Wh,h ;RQ=]u00yiC۔I^3!?H6iUH:ô 4P$rT`%2Aq-֢׍qt=@x#~0)p# ы9'iri]ͪ/@繁qVGCڤr,DihB ,m 9 _$q3= A$IC"6g^4e`Xo(D*6"^eTh'4xpFڜe'fVQ7~'c L^ԯwIڣA.}H;Ë*׬=`^ 9]r鐃 -Dfi2|QwZk‹u^6DQ1&H凎c!n[mi3)WfsF:M"uҷs.1!뾧1%s,hQs|hx̗3%*v9(I;:'>uQ+v)vR/egBhAAdh]4H:nV$tHI98/)=mͭ ڐn}}~ק?g_6WĩDRc0]rY9'z .(jHI :{HG}HDN`h7@{jnE#[dz;*3b\N7dYܞLcn3rnNd8"is"1- ޑܧdd?'xc 73\+s$N9tf{w5n%՘#TI{h5pfIWGev.F_{!7*qW}ӼتY&7!5Y'3KtpwU`<d5J{@ݜ P?&pWt|CX6|,9:N\8|ҳ uE UIL`F &ni?4~j3?m-Kھnۅm''J[/m$g4rfrvRޟ/O_·ogg]fyrd-i-Iv#GL`,Oȃ1F\$' )䉳yg=#6c+#  =J`xV,)ޖ,3~JPͪm|$oV1yU<̐t6 T m^ [IgINJ\Оf*Z"I)+>n#y 9D*A$$"^)dVQ.(rO6ӟZw_Ȣaޒu'- ^_,G;U\cAAz7EtlLuoXuA}bT2H_*kIG?S(קjhg 5EF5uKkBYx-qCfqsn[?_r=V:х@mfVg,w}QJUtesYyt7Yr+"*DtO/o۷~|hw^5wE of7cꃱ.)7.u/}tPTGc 5tW> l/`I~>|灹mQ$>N |gZ ͜IH[RNOMTq~g d0/0Љ!yB.hH׽;}VLGp3I#8'xal&Ȑc$ d7?K6xAH1H#:f _tŒ^ hgiNas*@K{7tH*t쬆Ny497ͩ KVsVokwW&4*H'\ d$]Vmr달v9dB.bq:__xW|1=6 R3y^ E#LB ZaZd1,]ןkznxtK|v+`VZ3JϧC^|/{ś}q3 >6׳oƄ%VDSWn 0,qh! E-Z%ܹpU:&&fX+EǬ.ťqpNZܗÅxjsD|[,_4EqgMƒK6f/FXJRF>i XʽAQGwG%SHr][KqWs7ѝBYǭ~RR"p9dFg|K- obY_vM 4>/]e/dy,8!xŋ5 R<^mYo 3c9(F?h!4ӥ2 ]8â6 U`V%`!c%؎ʨTzrKh! c.}.D>)d_ 8rcu,wf2?Ǡ*_lDn}rauyFp*ɨ:UiM2r:9ct X1lmĪ o玓,R%!`hGT LYF#g<cm${|Xdu4tmtїUJ\~dc0KcMlf2?mμQ ߉J4WrSHTdp"ӹ'cJq2zPlX̯.0H!ND@UapVoGڧD5>H]f@!=߸2V%Z 0"G4ȇʩ@]>Y$ًF_Mm_Tt)ib+q&EXFu򾬳ǝ/RS>r,C2NfOjpcm{Ll9vQOT>9U;])>6JdbXԠ `Z#_+D[7IIjJɟUh ҙ"`"a ߒ"G̾H`6yiCk(OA/$ ^%K^+(Vr[RR1"u4A.1X0=7f/"(o9/L1X{]q`Ȝ/; 9a>E)XOS K9mUxBa"'4T[Jl /K/9,rlCAj_TiǘP,:4F%_0E5IE'rX-|_W8ʐ/=ӹjhO%>| :S Px„*3_y.g9| ;b`w NtZtc> ײ1KĴ{3Gl& KT1ZWX8?C]~We$9; -.D087?1a@P5B,c}jcGȱ WW/ @a#LA4.ٹ^XڋXٝ:^Izq. ٽƎDn6ٹBc5Lt;3#i3RAٽ9| cbpcTfp> 6L/_x 'ۙz7~w~);qU9GDT! 6]c_:VlnEUdn6UˇKU;V`JUݵޙEO[)ܶCy*8¢/[cչjx&? ՃJȚ9!j[~[' "ssTV2i sLq>z@JM->=@NỲ\쀜*/) ̞r21.y? bO]3?C!yw3ޯL_Su>o>&lrw&i"< :]_<<7U_~z5є/rfn͝MLmc 6&)e+n7cyy{_~궼07R7wPuqpqo{ߟ+[w_uOq?u-|?WS_tOq?Eu-L_p?Cz .e ϿO*3 `Ђ6a-`kIf-s,RL-R`1eL~dپ&+IhYRczr?㐟,v~,b6)up)3K,RLW"Qd9JgT\1f3@Kh% a4x,kA k ^d kYj5Ah𚄓vXZhX1xҖ51Y +Id ZZ\C| fD>hB֡#-$+Jpሟ,Cg:6 3 xH "}C[`ӨOAFn5ʬLHϰ:N@VcyBI#Dr. "h hg ۃm-qu>V&൘ G7qi#^tҒ[JI!{q*lrD܇Gk@;oI<5xZ4xM"؇'k!>V|lk'{d+ :sXӄc)?W`*|\v aVT0"tMًcΒVz]T.C$cEp._0M`AlF̤@U' u,—rw=3}resLV&ԙy=Ejl1#XX۾;R;+[$4pjfљ lݍ3)`xvcZRT\%fNV Q)nsX }plMa~;Wi+f{v%Ζ/K 8WPll{f_WJ|8(A ä>nl"jF;/-R9~ {^'##AA:s`uih F% [U۴"qkjXS~+(f?TT)*qy+QR"tJ8۷)'3J1>pnVGITq3J&J0CQ v&P_񾅶X/)T/ϧ+GJzApU]<:Yn\~%&58IS)`0効<9ViCbw!bX%E+o*ƾtNU*v-zߞϢ +4 {e6J697@28MZXc Ub+A_Aܲ'SoO1ۀS`*f'r[8ݝYvjҩJ;}]|Bޙǖߔ 3\ a-`slԵ怕e7ːزoW|A\Qu&'9~ l|`pΕ [Q =r#vQu0 M.1%]vRat'IIc(Irw~Z"+A<sX4*X FVGA<^^7 vq&EwQű:؁6y\QbR9GuB/S5^fa;N(hz)}_vq@nu@$_DVH|08W12e_ʿd{xlzUܝlNDU j>zƖݗ&!jC`@ qэ-V Rt2m%K6dX)"]lj齔{oY:8VmS!:Wh#O0} :OVGL.xllT_oqqqLec2p;Ndck[ Rh6T#0H Q}ppS@ώ@#gƖ8sѹ e^ CZLu+."T#yrHhlكʼE-X'I^=bKߙԘ1"+< gb`[c1髰?(o$[eR6uOœ-m~)-&>883\6y 8V -qrG]~.3jsqY~ sjZ+9[rAJsT=~#02ݬf¸9Xe>sY~ ae9} x* zjC.5Wg󵸊y!1U:pU!ƔCm-7^w]斻~[hW$k sE0ڊSq:+EKٕ|dvvjjy6 æ/ML-yz,ZlQ^oAn-})xǺǍ--qcl:WLg ӁvJ[ǧc~Of+8qpçco#rCtKӫce0!Y-+cxMK-H_2:Uu*corD~@N`#m~R:ߙ歼!IZ5>H;0ޤ:\Tq]_\_>e˲\oUQ\Wߋ47WwߋKpwSSۘF,nC.\UߋoVEuY]^VW0R=<ު˜˻ x}[ێ'|;c^ M7 >5\-> m-8NJ\ALd!>_:h/NAC;?_ξqĎ6xMY(=ͯl~l8V0٨T zL{Ac:&$ ^CpH*DW\r2aR|=(L X1|wrO_g ux1^^V2޲jMi^b``Q#dBxV#NBk1;DAV$"*1]Y~ d->'I`qػ߶d&XǞ'~ d7%> ٞU,jY$,vvX]WwW%أ$i*γ'Jd_&Я$Wž*I>O.>9J}@<82,o;<%e#FwtJF؟a~dqm|o6|zZ4_W"IrMÎ$~lO.,ǐvdPLÏvl%BaϜn rY$숇+MZ3m@ׅ0y\zvbޛq|9oPdX֛|Åy?j䈢I&'y:x'+\xME&W{q6 q.兖oگHcIӒH'3"ᄁ_~oy3i&=}Oi8!M..hֆ69kvr]Niڒ5SLy <Rf#! `RgW<*G,oL139 (QUȰ2a~GRfu]To'tWa5yuUMڧϷ$U."GQ(ΰN-KiM"N4є{\BT~YfK10+oUyT_b<3N`)|b%䣨?(q+]uw~>MMк\4]4%_8SpQ0JGOy!OADߌ:B+;Kxy4%"g{+g0oQw`<ߚ9%h:NQ^N4giOy!bTBig Wz6{ٶ%w䨍Sxȣe =4k}~裬C?o9p9xA̭Y'u 6e\ߞ- o1ug5\$oIÓWAiNϫúlmnwO۸Uϲc ӷt$Dy{ޠy* g$ nţ?I+YcA>)!7RQ%ّb&KyOJm(}sM:Pw<&oG秃8xJ8h 1>Ȧ1WS]4uۯw )|ȹxX(`<}{=冧EBV~ ?.#9,Lxx.S̳GH͉eV)xl$.B]6y.4:"kϥ*s>H@MMQOb> kB"7gYq-hP|jBDBHe _w"py|Nd3qk3Mޝ~40PU g 'u3k{SWhNߝ !`Ț=?Pv>`+H2*r$ P5\a ݯy9.'?,5 Tf<õ!:K!ǒ}  ?@(:Po'x&ë;:CNO>06m5i2x9ud{U:J|Aam,l$f_˯]iM2 ag uP=ɹ8m$_:E< A HlP aե63]%$lY c"&:O0/u(ށ\з, ~(GphY'߬ - Ct] A!&?e; <8,ŌR`i)CI?>'A!x)A&CpYVt ze/YN[1LJ̺q;#xwkbn|CZͣI:5|p#a0‹Loלc{{g=䑘㼭uG$6M6\-PUyû%#ڎ寀JB4Gߎ4 q 5{($ɯКăa]-?J7˳4o* Q\݋|bQ$*-r@-ċ+Zq}x`DY;p^ak2Aē(@k13+ղSH  %dlji=%_|KCjIh>hhbL2'`&Wx;<y:o]ƂJUï7vgÕC擻hhVchڮ".VW1 C_MIA2"jlBh_jwla` _`4jRQ-TF36U5,*mǩ&2zqv5PŁe?ωMtVFI}鐋y\]+ 6SR~ӮS誂t+ьE)߿ڻx{xY b! Z7V9TV\yxÌF@1CdVl:DΔh0/X2\9QJ 3;е=<Ȯo**^&֏)Xi5dC@J?AȠNaAi3}!>27" ~FH쩪=|ھ`2ȭ*d 0 [65U;:hjŠM})0:7!`kz$dě>$W'W"P)k  a 7%ަv`TME\`%)V<wreWUvPyiUk Z p r7cZi-fy)R _<47a76X]BCR o.iD0]P1eE_aLm̅v41]/0m7.FJRx q6Tx?Zgn0YgЈfım[:JI1BuuUaί,xx"RWahU\F kucTdz5TE Q-w U>џ,δzv'tx* ^N5:*.=ߕiTUo}2 V'CXQv/2R oH'ӵra2xW}MDz Yv 6hJF0}>5.]a6]SUP\6Q6Q<Cme"V<496MSj}[״bLk*x{e%-0UFvRF8nf'Lڐr[nA o-oy )γVݻ抰\͚cE|AS|@eJXW@h .X$2`µTŔgq$Zkʄh,U=qc!e)e{d*E4dilʼn 3Բm8Phey[nm-_h(T2yy`#qصTM\j7DpM2uCa[1Նo!;6hO6 +</<{=loZXx9}?ɫX0'ބ7՚IS1UM!x-WCKjuKy%9[uч_~BѴ$ mnub9W5㾵,<2n4EY^$|;0,ۉ|7 "ߌkXyvXVF>-dUSe4Arل X*YINUb$zQ TCd*]scjQyWCU,]y YNq .r⨶Bp&EtLJtM鮌xNICtYKS~]J4b" 誊Kb]DO% %醱+HKeTlQU_izE$RJab۶fc;'E$C?J.-jwy8Z `GA#ٌÃv?={WuPHHWبy ߣܟ\gʌ#TX#KSYn+knTd3iL2OeO,\)Z*ywSl BCS7Two0}Aæp؆TN;˞*$lKHhg!$a]ipE".+9ڕBnyOoQHsU U-˒4š!ΤQPUwZKwj3oXH>3Jk-fw?24l*cck Ow7_ebL a&a#z 3w63 ʀ.v,;3ƃ1uf;N2˰vEmGp33@y&dG_mstg0= ;$9LCxeҨ*PU|pC?PR!2F xΎ5`o܈gꄴkyh~v-?Wwh s 3Cjk{0ؘMa}DSqO i wog]y\T5idN $dvd0s}gG 3[;4y1ܑd&<'ן>[˘PЇ\~Hq|G;%$'k=tq>y'[VW}}{4EK 9ݠXڂo3:: xYG(E ӀL$\-A}#Nz~R Xu`xd=!D7Z-tNMzc?I-&", Lk(ÝZMK(vG8E#h"u^爁u(%rr]N VAcv)Vz8Hpt-E= }fWoj(/n+ҏUE8µ듏k.r]ONȶ㨮&mފANF:!f tϷlE#5céľ:cu}%4l7F [uΝڷb7 vj"qJO#r`nrͳ쎆=zh"vqJދD= EΗNI/{.O'@ qCk#Cԣ:Ɲ.Mۚ^ nr{T*+ܣƣngWH.G5m}S?<àm1IۚmtNnpݕ,ięڥ{Rii-ǜ-1 aL*i=cu6Y9B"%r$\56fYfCA~kdІˡu!CGLG6.[|GfAqM1$ ˣ`oI{?-Y<5p-a5zs=atrC*mSwUll<",.OcՃ$Mf[H@߀f8ٱ_|,kĜrOy[>yJ9}.hqg9M[.BmVC 'RrF%j-f)Zo.Edx2FXqP7`48oj|<.e]`'̏e珴p, MDy`<3`AoPcj)i~E2C"O M3P"S7,,$_|V!'':w,Ű9(߄皭\V_ D pFL1`9`R}~^(DGdм{fU1pl[ѻ)CK4 Ζ;'䚑\+ W  ts{ΆY&.~gܴH$=6!0P8A˺l|IyyƾI6EzhY<9;;|tܤu(!wD?2@DЉa,(^GU2b),f}JEN=+e|^{);QKخ#Œ]wֆ* v\*B3{$5 㐜J5  Ʈg([E(*AqLMGuYR6vlzcڷXv\0£KAihNu᨞\[ILƉlk)&Xv\3~ :Um8ѕj`z0nfq˱:j*eU(sY8l^'LhEq@<6gZmax=6hG l& 찭͈5qLa:U O8LTroGL)Y U%r[0= / < h[W?R,q6Fy?پE}}tȏ\<&΅*kMQqTB|7=߶}sʻ]X ]jjUw㫣0-:6m /PJކ]}m|;v}OM =b+#;/b5:֟V o ![|363us6vիʺjV貒W`F'-ȷMVC|O|l|BgRUDs!@ۀVحE^T X&)&`ߪkA٪lA ʞ((AP*(APy' w\APswA j>QPsAUAvzv^AP{wA j?QP{AUAuvyuv]APwwA >QPwK6ts* %KȨ8'ޅ5ؖH :?'нsII[Oh()Ï3 Ơ?>E.!*̃Ƈ1obLa\}&*m,!>T=:Wß8 YG8O2 :haU0"4WuU]UoRPݚ-̷rau Q28d{,3W5t1<_g S:U<`G. e({SiL{ +.U67daq1,T3?_)\LR/&^a~ 1yp=@3$'A2U/}mH˻<@A5Z].{`S3^80#^T*=ER'ST*gG TY)R!^*H>XtTN̲84fQϩ@kޝJ5a6Krga~VchG5/+o4) \H8F x2Shv(lbgdzL Xma,#LW%`~NS( eMw R E7vRwx0(FyMP7N̉d&-@+gV`q UQl&EQ*m[.'r *Q5 (6%Vڠ 5W .Y}K+"X%k,5{iO쁽b 5ȯ@N2GAF@,U\ܗd>WVfAm EaPhxeZ~7-`8 M,6,s,LaHHUㄠ<OTOvNJ2d ̆b8')^!X1!G5h&rӆFXiSf鴻7Ym__#^06G]O0|SE<Xc]5֖#[tpA=. tkU6ʟ% |ُq,7t|=H hCO?gFn6[ RlmH>EZ ZAI'rDMz2[D5-vJ3=HnqgΊ#=HE56VA}7,dz7vfo\(A)0] I=7L*NatCl1[-ex(W^lLכnlFI{ag!@8 !GJKա,x[U/i$SU Ra{Z0?fd ujjXZRFyut 7[ޓImMid]-H[_E<riţ%Q@9*+A"33+Ǖ/=FF5c.:#Dt[4*ZEoqqQ {re g,w>w|Hgl8VcjY[q 3L&оIG*"/bD0f F66a Qh|u]Hr`m4eZe"yyvKoT?B^dƍ&7wE9*jz ==;"iDA_P@STՒ'8E%kߦ9R&qjuOu]__<^k77: 0{Al~wl:?dA! hvzi琫-DCK1I|L6çc$Il8MX?Nih,}@5H"EL.6mSΘF`OBWIkT#)\ F҅ڜ'QEx(x@|vca)sfE1Zƒcܻt @,IcaĜt9=;g(Vy+ BՅ& 6 eac77k}Y!6gXЁ BueUy2I8:L)p>bUIDV}uLY2:5&ۂT!pHb묋A=t)5M]<(QjB0F҅-eYmxDԅFKECU/Z QtìkXޚAX6=qse-?[r^)jR!Јj12F!Z $*( >iP#& P+Ã5--D[![\"4FLGof˱Zk tFsY5O^ p#8ù \VA}8Km, 3-&ƈaϚ#tGH~#S8V9.{A{DVXk b$]8;3 zZz=7-c^ÛϣĈ93lQA;VS™f` 29R.g ?D4!! ΢o\`+,릠XT&ZY]IWlI@iy.%PYv`/sNpM0X~XukKﬠQ vpqf10WG:(gQI[>AыۜNݜ tmF*Ghdug5C]3Z5< q8X`R]1 +*v1! 9-Z]A۱hkĔ,[:xIr9Π`$]}{$89P12; XR-KOxB\G#H!d>H*8% 3(yc^+M눪X mpScu.5!m⎕-|Ҋ n/-b$] /ʕ9f㸓wG; QI>n UN~Ֆc%%8R ,$bmIeJcc@6vp,yEL+JdpaUeyv[aHlBRN-U5)l rQM^gcjlS7nCm \[A+xSuf)k=>]m#h (ns=9,{Q Mk e4L +r6Ǡp#x_ I ̤"U&3P (PʹB_MHG#/N8Syt0e&j6U *C81 0pׇil2sY!w6)$U\I3ҥBc"%8{ZlU;KZ-G?&GxE" Z nvҾ5\5&>֯NB +2Db!l҇VRvdF҅.~կ7$8JB GL!=_'1AE5{i0PR^[8dZTK-^!%-+r&!BuѷsB<pH{M '1vvݣjXV28-3Cֱnk7#1\kRIVٽݒܢf/L;a̚X>x;sU${cQ,, P81-~e`+XI8 S熛b!a%\b0kҵs(2 jm=1k5!^Cs CspعpiVn^sTv;ld5\q[a!hvx-lJQN/u>Dhhhlz݄!a$]XzGi^smA77S>XSuٚ}X}*Rä ԟMt{>a]u:P+ଶiU{ V,s=L Ud#=ܬNf0vP - *)=p36hXCO,[8M1|aSsy$ wT[)\D~jwg)jKzcIum sҋqD1\D(iL];NQLq+Ez28GF,(>,p"9$nW՞HȵÈgD #^\75(N`E݇B-gl]ָ"D+3-{5ؔ D>t߬8&Ap>S.[p&#bj/-:`d֥ F1^Ipt|Q ռ[}y^mnvx8ϫjP5 ڪYRZrՃ a-[A\!tx )~׫45\v?EUh+Urhh.\ʶ-|)lfyfIq}ߟ׶fJ1 *l({y`0ł *1a0ƔmH7$8fP:TҺ]}t*p5xoϵ3) q\2xt y8{w$8&t'FBŅ@ $7zM:{PސTkoZo9i5Bқ-q 'qk Z?&`Awo80XV-J[KU'-U?>{jQ.HsreAgRNJ=1➯^͖RAj%$68 !t3|4iOvm-rZڻ<vwSKd M;p9_[.BZbS$m+&L;Q?h91 FA1Qr8-~{&z^4\l=E"_~?ufW>̟{|Owے8$~i1ᴔBq~`򾯙+>8gf:5O@L<@2Wwgxp'$S<@ߝꑓ,Xd0L 9ocxc6hۄZ\+0^oLY: 1$xF*A+Y(KmyZf>ޚfMI'|q3SSQѰi+8 wl%tnKc0JVu\Zfukj$iQlwaɩH̻k7W} PBLA%ga Mx65-@_NpvF̭@L4V57jB fߪk阍|~Sq:᫚DAZ8?)ޝWRnI:c}S{3tR>TW[5Xp,IW0N_dy,?@xuej?E&I/ۺeD&66,?Š̾UFW䟝,6a}My’5Ըr6y9YWK +)uOcH> 0n|{7^)y305W-1P葳Hn{"g@-L˘ے1y4$Qfdpi%n1r`W(+[EMiG?0i 3ԣ*Q!"-aO`kB`, 6q-R60*AQvnZLE۰E ,ViS,08i$܍X6_Çc[%8f{#ٚҖ+EU5*bnx" `mZ!phvÆ&#HxBnl#Iy{TcMh${1U/bkL'˩] 9i7Hqr@d̅OU/XAkMf3g1N>~61ʎ$0I@bdju# ݲ'8\lzѤY>e O: &Y/Z{O ܈V@Ż?$_on'R{W*LCdZSuqXe⬓}ˠ׬<ՃoOݘtL0Վz4VH-sm0eaAΛTg`xUėwY%ƳɸZ`@/{hߩ5K7br"V{uYCrt6oPM^x2^ s\wsɌcn֟ s,P)Q9V9e;-/=iɬjYcLڣX9RZ:"R'rmq  :]lfz>%S=5rŊ]N4 `۲z^npՆX9zxa%ZzXY!((^ +p⹠6M5<{$qzȞJp3xjA= ,-T&-ߴcc[&Wk(&q,o]o[<`{+/9CV+gm (|3>}`m1>mMr`[qխ9j)Y/O@Ji h,R@~QU0ɻW }] =ɩ+kb;@1;e"JeR{ e}vǺ~qض)_ne_# LPu83hRzB/ӌ'oypg!1Cz/p@+oWH2ہP@K# ΀},GV7#6!^8;[/0 1=(>'O"|Wn% "\ ke0vU^AN{13тK_$)>,SM/[S U0%hB^k4q_saڢK7*Bx"W9 պϻ#CYCKkG~a|6N?EtAWj,+rc OsM9 ``S-Ѳ!s$814f CuP ŊM;*o7"hc?ހ!kZY_zi ׸s6Eڇu2sL*OūR.ƹ*'L =g*I8G FhrK<6ksqӎqqű4jeU;<8BW) 1LAQJD`, `NA%9U9OBqjyiGxiqAl6p_24J^zUaHUѿTr `J-!^`"GY+̩\r c9\-9vHr4|3!?>paZo!^ +?\% ݇%>.}/+o3'P+bNă?GqwW#MW j 1*]KkUZ9_Dl7xȥ9TROI DMJ?5 BiQ5C2pPI&;)GEQn+R;Tv<| r";>rN6 /W g<JJ+i2d^̉/`?OPʁ7jVFJ`_TT{Y1"H/=DF엟|oꚿgܣH *0>f3>FT즖դzkYOF,#p"1.nabjΞb"Zb{Jbgll}7}3l$*TLjv`Ҳ}"kk˛LѻiuF]q8e_oz YݶPMtL|ۉaxMc߼1&HI.qK/4vrG4!c+#{T``1@Y 9"[t(""R4HfrqSnUauj(ofXo羸3dW _ӽFۃoگl܀b\WaK&QP}pUShB1+ۓ>B(gA;,N@Cx@R2p5ބ   (QV-R15Rc c1h FwX-^@u4ͥaBHn3sJŴA EZ2r8]p3K|8z*JNy7u+֋scHCq8㌷ŕ`ZBY'0BOn6KtsbQ7&. O؈`&6"W`M([ (  BPKZ&+<,pă)Sԡ -]%'IM(4#DhHu 4J)ud=Mȶ{ċi%p>VjbRv"9 \p mGI&[(W/鸞RII0nr mB@I+Ęj{*ƴ@zbkR+-M Jn!aòGd|5n57`RS_YDzD&_wT1,H.ɜQ=EN; :PJ4;%L{.nF$ cʸgRC :ƕ! 84T#N>x\DrGxƅ6$ebTI(=U Srڕی\-1$@ Ǭg|3 J7-$prc4,R7X$fI,1qʁr1 7D Pl1gKϥLL, irü3F $7) J#3r 4> `v̀wgZ"â(L[ASE%ةeLz}%?$N(4)wσ<<= = p ="Ԓ3ATC3/rp az<*p1z@ `9@f TTe޳׌T~6X F8{C;A`n F`Ͱ 0THaPWaJ; mlp%fZ)Eu$6~ym26~1r;эx9ŢV\){2/M`;=yiX &hb+JN=K3FH"sdܥY$rèblYNSh9QZ169eXBZ.Y;p=Hf[-pbE!k;Kz%c  3_]++Ԟz yjΫ$Nq 'ޤ gSGIαɹm:YXSl9$ `͇cc $4A1W!1WaM5ِAОDkDiޗx1bFIEz~luqՀQ)Y|d xFCdQ<#xip[0)*f:WL O0 /P *cy8eNj*pX29uYH`_0^WRSq 8q{"TQpFP#Ȯo**AbԀX P)k%"E(?ZB%xo-L!%B~V=1>j͍@3Z ?.% DR: i GRG8z*ނxɛmR f=ka(CLDt=Z+87XLۀnΥd8{B%B]0`:U5g)8SNiK^L*׌U 6˨3#5!ĤIULJΙTBasbK4qڱJγaj,z&cN |K3aIPL`sI$J+N tBp]X<]1t͵iF= 5)qњ&r Rޯvl"|-O_hfIhQp;s8DKD^KOFKˡn.pp9su8OvE .]]Yu jYi K1rLȄjP1^m||tL?U?Tm-P,Ei.+QȮ_R= V &oK⫻g7F;/G[OQ 6Ds?[ӭQs*۟̀KF0 GC]fp6C1SnXWp) H׵-`rw3̬H,o-N;Q_".`5FsYtk)'܈"EqO4i߰7zNiW)2>4,6;LBI*S-|L5U<SNk{㴼IN4Έ6$|wb}yi{#[:k:A<Q yBׂe ޻ mcݳ m[x8bLUX:i ?r"o^&tsƀn` flsPp^xlDJQ͎m#F);bnMn'X?r:#Kp5U5ș:O7RRsqpM=hyVk Kϼ[-)*$檼MB,DJK,rZ^ٴX%\yf(]ӷq_|3^NW/qוP[&O\yV\>^:_ib6+q 7Cg r~+J`FA*"jcVR6Ih0zYSxjǰQb99ے|Uh1t*ݛwCj(D=ÈР~".yH`K} v̋(-B{TLiŃƔQޛOE]<+o FqXjP%jW{=Zǰvެ/|T&ԧQsnx*^$ +$FE7_O^>QMvcKf}ǃzy]TɿMHj@O>Lhwp#o.!ÀQ!&/4(ÇP@fLYg@Q\̿b~}X%J\x{ ht6Zz[~|b鬨ZFϲ?یi=i2/|rJsRAN77VW}F0a7H3<h!nUvbn ƴsNcIi5OdlbOi!3d= K+,8y#B `B`lP,׆j%7AZ+(DUU(X"$ J<݌ExM&5Bop_'(tk"w)|ݵ$m΅lV6UrWܻE{ρ6c~ĊN0_00ôಞ5A+"'Buckb5C)k$vgժ$jy(ods,?I p%I0!ޫ4H7O⒰j,jNhٽd+ii Lq!`ճnWU@#*T}Z˫RloOw7Hp]Gd@)jvZ΂8<àQj&Sy|Gp9r b 8E}U􎤰zK̄17C !"=ӺVC}Z¶]m j~k6FF/NcT=TIFn;Cj֩g\VoDW1,ҶW7s{(EС0g6/`Zf3x2Ui/鮥PQ}0!|Y/%t+/ڼ'%^{~l9$Wk)@^g0eRɗ-P2\:}D?!qrE %WGrU\Tͩ*wPg-!55YkWm_<]]"U]$,ByJkz4`Yͤq#fjgCO[&MKpv Us2pz .7Wη.Y` cwqJ}߾~ vyeHT-/5O߀ôXMV!-g$|6{H;+ǃ/f~4+t5[OHqayAM688v7*pjy593CݝNtu}vѯ-Ɯ_R1 ERy.>o ^~ŸPkYF^S[!PyI@[x'(Rcrq<Unl >mWT*s !&dt%ɶ]ћ꫰+w A \*zmIŪl]T, z9/6^zffEڭ6PKmRn jLGSE2[Lö&S4) g3 o({Jbkw|N$`vbRɕ1b[#Yq16\Ssx' =v3#| * |Tlث*BZk衱UKbxU[m W`M{Gu mk"ٳ"߷('-kWt,=]7?ŕyj:wkѰӊ) y\¯n jtWKh5k3qϧ1P+Lst2mȃ8~۠V 0OFjRZ7;>ؖ>4xO}î~:dcwy&!9+Q@ibmU1E{?yDItɨ kUv#pEH+2e; #kӔAnhqrwg!V᥽:3T$P+ "-E̲T`! {gk,kޖŲ;ݰ5+f4[ML#N[NH]/2;㚷efʳ[^_PH,%yr yPf۝o|q[T{w|QBDcs'CYtܠ4J6:˨Fz߳Ӽ-kIUO*dE.e,uh2E iJ2CGޱeyL55or;c9pu`"5GYi0*8&Da$'P-_c5y[ _^Q(GO0wyԸm}j+Ec0$ЯN)6#}㑨mYnqN3(vww&PڝL^(pJAE"+!2,hND,jޖ;gczxu{y,iNg e"#cA<j;y[ 5ZXY kBOv(E#8PMXwkEu }ב O/yϲHܯ4c=؃ `/2+3%(RMnwGQUXR&}i22+$-c)'3sdr'{ŁO*gxnA%0PnMq^ %~D5Ӣɒ]h'F($$K$$MʆÍ)5es*^&砄hCP\)wh>k^+uW}U ]_fm ,W_0 3?di#f2qSlhMEo i"|]c$ZJQH<Lr#PT!Q2N$$¯s5BȈ|pZVM*wIOv<ŀO<7 ʻ(|^j0 X#Z b=(O(Z[>*&tRoo#2u׊L,+ QGGHQTZiF$BGZy[Byͣy x!}rfБ$vGKN bh*LʆFų^!t=}%9~%9 |* ߼ ^GD=,T&AN6gf:Dc@G=PY.K?>h+nWW/8kVRk7,=}&O teJ6Y*M30oy}fe^]3Dx&ǮI:@h-D |*˵Edktt;OR^ˎ'wB`QSjlM!Q,Rg!2:PkP+~. |*w e)k4-]YoQ!; {f*X~r8^š*hT\ mUcvW3IIy Qi]S4tUd^B#p%4 |*UF-yHP֜}HpF|\IỸ=rVqp?|r܁I6!+`k|1"ø.2%rK =#iQ +%XoT$Rژ`>&~>Z_-d{X3Mz8ky3t<. |*w 6H~ǭ1|ybD~`Lq=qM[chO4,G>`cY2FkLJ{ ,!n'y+Z> ,eXOf+cpuws>657kIŘ)[{m\cuTq͑r-ZKPkF95mvD^UШʹi bT]ARF!|XLM`=T*Sл+>^3.$13 hHa0}fEO庙Kڌp/1c4j.#n9핵#*Q5ǵu\_,f+Hlr]* J?#Y|;ÊhX3{Ը5Ϝ髨W0܋Q(0,8\M1ݡ hmQ):P, 6Ѿ]1eQ"ޫ$%8dLFdѷQSYm|u؀DtJ^I C(L-Y@6' ?'¸>n#<>}puaA!;LuXvJyC,wPZRogٱǽ!g">`o7yjc+(Qb0n ފ׬8V65uȔ1n?0cvْrү=fϚ9CqIc++u {[A,U {z2X#0Wkqp*$Onfb$1ChT0qUb7hWۆ踞__ǨϸCˆqXhpWdf¸g:p6Y1 wq+|3-B# @]H'"8i]8@cr>QSY\rsS+ Ш9$j1>{GBWE\>} -cM=Ժqv\~(D4t Ĵ H&=H?~>ZK&fBTIRTn"@7RDKF+6n914n(%΁^fF#4 QSY΋ffzIn~mjTgI݆<*1q,H% |HVl Zp_-HXc͗)]yjSXzﴗ Qڞ3@%:VeFꝰ +3.֎~)#M7蕲.͏ 4u>Lh88p,1SY^p9]*X g}D W"uc_iTt ۻ{j1hT<̩Sw!rΚKobӧ7#1hT<̩f} ՘ j]!ފ;]! > u" xeU0% ln\_Xsv$: ݱoYh]KPlQ3)!Z$Zd@J4Y ~Wo-6-zʌ>U7X!\ m. Ȇrm q&Qs-SM\uG `IaQx"$(HåQpV'/Sq0<>g&Fޘ]c0\N)s]p1z/+BC6-FVc]Tz䀿z>9p5b*}FEr;x5U>@btNj݊ 5I6c1Ƚx fz'˅џ4v_jn+]D!gM-2<`]Gwz~>fQ}#LLwF_`y.5an-| Ү֟L-x$aI5N4 O9AzP|a<6y&D>ᬷX:c,or4]3,Wiƛs14U7@޲?`vV?`ZAk6rl=x^0#'JnNr`.1g&c1>ɢrs|evm4;Ojk8>{GsN]<׿(嬚3_*Ϫd*v:R7n%D׷vKOkMI ]B$P62fx4}dEOepR ׃RjdoĈA3u lfƊTsr }H׋zO/ᾅ3aJ#͉z'm#m#(,W1hAp0<ݒKr 0pʑ}\?n+V툂kTeFieGy:ym^ 9uQ`{pN8F҈R>U̍che]1tDȦC|;.G$[1bF͂B'15%)]N>jk ;T:W#8L5ϪRTsoV7W#j2X\=ՠ6 .}Wƾ_Si?{_y;k5Gkph{ zIhωԴ!+Bȃq3y/0PSj<E]󃆲:0UGY"WAVS/90bw ĥ/}y89yÀOeUT}%~-HN7 m}Ӵjps'ѦgZTx 9dܐoSUo9mc;HGͩQ +˙/>ɷt\[(*\CIB]Cikt^G2DdJA RPS^0ޝVwd}6EGAespלűv}/r1ĉ4:'Y+$u'3cܛ/O4(8(QY|{c$jh3ҚTA )QNY`uB7K}||ưvl)5kEm#JUHIkVǖ WÈҚ'RU~* |*˭j*=yjC9I\}ZTl0^[aтs0ďт%~'Br1TTQ'F m2m,< \xŻe]gJ].gTnt?5fFm}2Խ_)¨4>]PaƸ o 2CVښn.j [G .n2!*uQq;cusfOjw֫zbsAK<#LdCV$Kޣa8b ..╾8:Scj{z&O}ҁ_@̖H,K;Sx|贓^3vNnoP,t( \$o4/tJ' Uy ~Zu'AK"э95;A6&.ھPy`||괓yvKud6#ԶL6o]ɰ얜}Fy8MM%8 )pOkqv2[ӵiCr.{G;Ӥ&x7U;V>:p:4͈>S.eJ[_mot#cҐ2^H-﯉zx_qSnSsPo8 ?bd%c! #RKzDIʪ-w•RV4oʒmތruXHĉ4g$A'ǥWdi[b4 N(IY)<Ӭ >츚uJm9{B#߰8.Xwj_>$$~)ͿWpS<<>LZ>dk HǛ<wO px>2?Ꮻ?|x9/~y NT)SDG~n}x(;֙=~}`Wr^T%g]5B+-\t.AAv d(#;$?9Y\lVTtBiY ρZov\4|d/Džr\z!QW?-1nvW"=鬒?x0Vv&6=u1!m6.ﱃ\>#]vw?tu1`mjuӪ&MoEw]jܤ=?&:/TT^? CC ;C|&|;vD7>~/?-_te w~z$}?mq4k}_nWu݇__p^`=׾7&8u/?%5q|_:1o>k"1u϶6y9 ~ 8gwva/yx=;!%r۲Iu:HKd"^rF|KIu6=WPq!1ɈJ,V26iWf9kmLˍ6T>2d80uZϻ"} ϳ  =Mb?m(9JϛjV!Ve'?EMpxIx[UONܝt$Y=K6?/>-xL{-Ѷ=hǓ;}7MϬR3o.O<ӿL>}a_xv?^z׹{+s>tVkLJ;r?~!xu/Mƨƒ_[/vRݩUO:Ԟ>税{Ҿ N~O:!.MmIY_~o4ڲNc v) y;Px{@"-Q{%(!WLD̆vy-M{gxm:?25ήz >"馇Z63a9glH .8ѐ ښXXx<6allw4d4 z4JX+6ҶD&2"+H6j}pl\v T_07o*2 2Ӂn Kt i#$Ȣuv6~"j&!1BF- =ySjndj=7`֣QVsT#j4"D6RqIh}H pI-o.")!<'|(_~8<|qә{#d@dl'b;Am4zZզVC#$Y"͞h@3%=$3 no9zhtQs",vX0-&t O|^kоquŞ5 \aY4i J==x~]O=]t"IZQZ\灄Z2"WRFpNQAo)Cc& 1`(E{4PԢE zT{;`"cڇ<⿈ܰ]U7BWQzuzrof]lxT\$f4B\&: D61A$ZԞFK6W47ߗ:hqSr6]9WC*6X:U}GOOWw:Mv?4J9?wDrK[t+qR+.+bV3ׇ(CVU/H!%W=]Rg;kT>s>le[qȃn\L Xs]ȱ+].64:K˄MCmڀ6zh4u# >*LS$:86mv/m vG>\/o |*r4!Pk%q2gN׼0Ήϗ,c. 9E L:A;_-儸nFΏbϗ4^i.x|-m gX Aq2|G= ٖ]&1Y-Z z%n2Ǥ#kR6n2-yk-%60NDG@F KD+W-[:; X}DPTij%!2W%O7q]k!=*+"~:kAt~`7 B,أQE:Ia\<ﳞ5W րHk0z4J+ t/D:r%tO&nM(N$ȘJ;Oe$dB9ib : A;(rN@٠憑6hK$U8OgLh62Qxa/Zo8SƓS6xmv2qZȠ- f4b5^9'̚U#iփyG,; Ct 4* iE+ Jf"n t?Yr#~YHXI  Ė\HD xmzhq4 ͒6Fd\t_3ggB" v;~6! muN+ы}]@i/|Z%8FrLZK}ʦ˂ 5kء\B1́t=.0bJˀLS1˧\s+(![IJĮ*owƖ/o%5)>-1(m0@/*cXaM3 p*+CeGawU3z)8*l05z@SxnC(¸N)ZQpe,u# e ,}Weq4JpFMLmlY[vI1:O(‘L#V&+ʄ@ Hk9egQegPZOwP)].&ݷhN,{vאA7 9!9HZ>3bh*&mc2PEfAPoys8 X7zOѶiltuÚz$3 A{' 0@F Eg*À l/Hzѻ`kRdG~seIN!I~i/igCb bq_F$ȈFt'QL;zv`,m5Y2􎔐Í,}r)o7;E׶$rId4D7v.E[w rCB–q/(uIy[S[?9>b-dMSߺ}LZW*d x*gGKr&sL/⿶Շ}7ʔ]l>-I=.ןY %jKyor[@qye"H„QzrذBWm7dq.(Jd25Mܫ#-W<Ԓ-Yw JyuAM<6o/l׳wmz*1l^q5I شp矾soa4 PUhsLLV)J6i^/﫿1';gx$|3ŝC 2LHpxv4|{~׏9V%ɵ ,zb\Zhul|C[rW/6}P0H_ NEbe0W-˵u6IҢ)"(4M隐ꚉx zSN!^ײ9׷ܼSTnk|jq 5Fq4 XsN^\{uq ^dS K+1St*617(-猟r)FXR'Iv$k^{fkKtA r )(jT* & Q☋FZ'Պ;,~>'Wa.>7̥1QA?]I0ȳSAi6F%SA6,=|c>]Ǔ 1Kn_V'8%$e\54MOsMeY:ek!ۂogZb=wpB@/Qn@hg5j㒗`wmXƾ"l~i $3@^v$F@YbYRJeJ7b*ux;\B4z a۽ڛΞGWozl0ˆͬӮǷc~loE=.km(PmPOj۫rMdnvE??| Z moƾO`NreJlĭiDcK_PUlogXop?*l?nM_VyXq e3JlZ1񢥲SN_?֤؏ /6q %|]ԿTvh~e|f5eV/~˾w;Yt3źi\ߎUbޏ~=O}\[̦?zo^S+}i,l/&Sas<6@&0ր|xsa ~ߋ[K|iMH<0d.(N%֞G: Y'0>l6|$|>aayO Nm_nh)a ɬFOa:]eb;u=ܜOxC:n W=vkI}Ejo~btw/OԾZűw~X\؈?vRX\$M97gϮh\1VpNÌSwNAeh& ^[liͽ3lp^| X"( bN?W%xQ\{ǒ&u{L#d3`PV:LqސA9r(76`Lx;RLe mNW]p;^%L}]|'U>y؇ŇqhRpCG`rZ$ Fx_\ca5E0 ϣϺ~93"yΒKJǰo%EIzzE^Ym:mÔƩv0L)%ZgŬ/,sPJ^:e&mY㇇qG1&_\*85.47B0QBc ǥj.E ,x-*>M)}l[mRvؑ۵*J,VE4}bT{LhluR P+ªJQH(4~YQX@ rf$Gі3.Eئerr"fL`Yfs?knUYRRTs\YZ8L11 ~^˥k!@gSC>bٴ0Fe[`8WH8Oy\ 8],cSl@RY9P\c  =XN~o ܉9UI{#g4~*pgRkM(2CÓPbEJ٪ʧƧZQ$š1{M)JS|,i#@F})XL+̈z̈ƩRp;KH e+}Rn|/I$MIH㻫y"=Amw0.U΢If'U>r>*$?I4IKNJ=f̧\uv8øȨf2*)I檛O5TjgϢKܳBܳei%E9k[/Xs$esC;TL{q"֒|-f9L:Dj+=EKƬEPPȚuK/TLM]nפPy1fEU d8<G^TK[jTw[]]_>_8}׍ g[է81c ߇Tb[ltBaNE|?MlxYǹfoɎkO>Ia\fCrw hqc)GJ.$.Kmu] [x.K3^hJK>uMvO̪Rפ^0 !gEO-9%HrYSamEƤ@wC, 8Itx0q!Kkgj֋9W9![aM}cAw6{J ߟx_rPm\ CDᨄK<>.K"xɼHъO~PWW//'gͱO(f=/ٶ p "vῸ-΄:7oYE/ xYAahì8/CUV`s DPӺ DŽsn6-`ߊlژ 쉫>'8 F˛^-@@}TӴi펿 'Mf2?E 4T<$9Vwz\N̙C~0 eE|#.Gb!= e!C983EXZ㴲jF^29.M#>ī4 Շ@ h,)^S.Ppe8( p%EP4Vhٯlxby8,:B̑3蹢D0=M@&NKI@I ̚J>,T ♍Q0JĘgpK3GSW hr%`Wvqt?wX>pl*\\9ǒ<"2k%ad]+dimJ17V GkLv3D8  t<0'ݦ7Na_QM %T@V>hܒw4ҙszQ#0~1UjiIkhfWyu ԟ̫4MgNR3ǜ+uO0fA4ը hx yKDŗTJ3lV/j ֩M\+pk eJ ҝ#m4*I|)aR?,A]w8wѼT: ҆/Rc11 YCT/7ژ3+{ MG @֐λh%;9|]U؁\%hb2.h%t`}Ont[ap̩ۻJ o/ap-3ZP/3ɚ,,HB &-D fxPJ\L" c0$33Ӌ22ZPr85 F0yc! sI/gHF !*],1QaefC*( 63Tr \C"pIn b=*iँ"_F yκ:13R( !6O"˰I Gh?ڿ~k_wzR[9=Hю'`*D0(.WLQ&pRtV+=:Fd`` G4{ U5]$N3C=x>Yt$p( $%Q9c / VǸ-i ?sNvHM tưx)ś9-&~7}Tqu~@VKSZu:bu+LCJ̔Hf6nQt>0Gz>nSElPSBwOwPR\Gh ̜\J(2^P (h :&{y~<Dž?^~Tw0,u F6p B)ecIc`)`IGha.RtZD ܾ*j124R<`zPOMMB&Ś/&=C_TF mqgoBs -KfA|"J! o<8i@IΟdϲu4d/=/9=7TI. %?, %6#Hx&ڞKa>0CLOS'I?BcXH&MFwsہ@/$e [ ^S&_SXGh ۚr͝ŷfGNΥ=f5(-Yh|2?0_BAhi/sPBV &edsiC5]Ψkps$8ج|ZPau a0RufY1.Ryo}Hº9E{[݅z>pΝRC/P;] o6+q^"[H40NfI ({d#~E[~MuSثIj>Wz x&)}?%1'kGqPڰ]vRx_=3rb* = }ߞCn7.9,l-}LA坚b>$..VxJu' jgJG-\m$ ';6Mds~IB{8ut֬p(%M|MC $5) JoGq}aMkX (iq0PHNpvwxF~(ʻiQwNVj{ 1.*{p! p{azGJČ7't0(8T+pTz&wʷr[y~;Z8}l{xVPǸ8$feCwrUtߜ*}LXQe@H? e='zp_\O-4>z&cgԆsc\8$*QK;gl>S JSuq%KgB5I&* Jƾm=AL YƬ⸇0ƹZlǓ̠:XbO,~Zs_ޫl,-}L(x Tfu3*܇{V4"_αV>zO >Jۊ}D8IHm^6;zzɈB 9eI8!ֹYC&0($3lRCFD2E,"R! ojZYhr[uׇj3v[ajkOQR^Mn̸{Wiz؆pۡ^$ՄC\qS=iߖ}l ؔ$a) U6*qwǪ{=eo֋ X((I$(cj$ۥ5>(XvrWo:6az5Uo=`* Q%6"ۖsϫ˵^Kr # HV HϮdQFJ~t Z> NYy lz-%Mijz]m`qj{jl^*^j»҅Wȅlj"fdl>78<ɊKov;8(W%ՙV'MQDן}vMo0ژ8*`-2dv)`r/֯vTSիoSj,ɍ2*||ޞ~zU@ZbfA@{zT쏊jQ>  FpS L$/,]ٴM|˙qD*=0~2p#?@~&?ޯo^z/y@  guWQH?y4ns%^z}5cdg[G{N+_+BAel# FhP^.PFZb-@R+)Bt8KHW~ᅲ>%JDVFx˷gujo)k2|B.dV\p5kc־.y[x2/drܮYˑ7+~_||4t 8NR} 2Ē`/nۺox1yQS~,]e&j>y]6LRY6OFf{0V+}1O=9|Xkkit *!%( c% 4$)1bJcO9S` ^7ua˫ }mrIL5Al^7# D2 XfnBCԓ٘ص jc?ͦՔn5.'\ g<2fKKR~q۷wt̀NMoJhHhX]7;![gBsWy6S ycsNڴřJsur^e^c ʪKgh(9p (MC֦^,W/bLfß`h΀FS>}>6/EF(y \/L[Z[&~] Y}$$.V!7<]ΖF}A/Z'.w0Aȿg!~04:?P壼P1#hޏxBY,5Y+F8i=:oK1|w+/*gb2oA^4݉GKk8g7ӧ2k= \'ThOS@f4ic@b!(8˴ C&& q* ىl` Yag|epXZ7WehsݗoVe~.[{yݪF[=mp/(P?Pq@OHo琄K">z&nRWzֹWK0tKY,DQHc̔Q+ga&7(8(~;6?umVɥ٬Z{,=myc~SDܧf `l4~ @l"d~%>Q4aId*E&b @AtRPj4X6T-}K8U"/,6lg0⦬:4;K\P*PK=GUai s%٧X&6/EH@?兾q4s ͽ]L-ٖO2Y9bJCAЮ%@2D: CƦq")R:h#QbJ(F'*f]-9qW*JM0Mhu7~T^v/u&V۽j†?&Ld$(Sɀg" c"a5B[epUqr_1nb L3]fPSD0 CbÌkTXFUJuC1U3ϧϦI|_ Q2K;eԜcF쭇xj,I{18etnGzo>˧LD'=p5o~YAS իqdm6\(g7d xI"':g&tF#+F%IDc^ cc?Hg%AL<#SFK磐$R 46r Tt$HF$(Fs8(ea:F,hOI4~)cU*]z9L)@h#¨: i$eQăH)cҰQR ,L$Wgs=y~P2>V?ύD0_K%ٚI:,NWH[H ޸Lh@0S뭷`lnT {+7 /*.u+sUvOAcD~ïL w&;]3P=>:ep$Әh"IJA!4 Ҙ@* &RWO3=erYc )yՐvtU#w%HSQYuI=Q'w~d,O8cy±VXp,O8'~,kˍm-7֖X[n-7֖{ވ'8x`\N\0~,RD*MF ST雩kA#6Zgmat=1L*<ֵTCoWaDqӯO"X(5HX!q-8JXIGHPh`g{_MEng)boWp"4\9?WZbevc 1?(G]Uomwv4݈/o<+Ӫo“vnN`TE?nğZ U1c|/Ƹ1.~Ǹ1.y 0cepce2Xn 7V{$Fq%&OK}mL~Koh^;Xٳ9wJ!k[:i oՆ ZZ#֊ r{gv`EI*4Tv4Ag15T汑}^.݊N6mrɿ;3D?;,Kq煟߃ioDIzOkm#G.sXdfw$;w.DYJN6`+vˎdEgl#QX,~"Yu{MBURyY\m#ƶS`[#mݳs7wMY)[oz|چo uf02er_\~oWqC)dZp\ѼKsGzpVjεFxtVV FjU[޴F]fFzs<84-(R cTԢW ;m==$*C %.CPb(q1 PbpC/1#iP춛hzUůEEdk]UoT J'~1c!wM9w+]acRq" 8MOb̿6u6n>B}kjpg ;M}}N .!U9x??ʑf<-v|ܫ < p7wuF9K5t$zI[ƭi v -%Pq>Oi%rqVzk<@\&Q~l1ct%lXLjFϨVt{vߕ?OәMnq)e_8;Pm.F c5ξY>ugx}CYcNڿ^`ҧLq"㱚ON@:.$ָ~|iYM>˗Y,KrkƟ+.ůɫvMbviwƯ&M|qzbie@hFlYB^rz6٥2, SW`߻RE ,M~X,Ol )'wU*3CcTެ5h^{ ÑW`,-qދ9>?OPu/?]] M~yJ^dա{4q@ZJ|#B7 ?;9$Ulc-Z7Z8I}]ڛ++Dۍ8iж`oϿؕ~-&;S~ڣ.>h*Y=;$:k_l-~x}GכTNj|>4|ŷK|7e)V}^X.z=N+h3ЦttΘs¡+ńn,ۍ7S.Φ$FO/Yv( f0nBXxZ.@`r#O&ؘ cVPhdI}6!dcxٮ\-o|fsfwݹ:YcrhQy=bxr/t2]$̧Exg],/DܛUKΡ^|]{n=ߴDڞVl1%7/[WFٻŞ#S漈Qì""%. Aă,|T{nM(;6W'}$o]9L$/Dʖ :P$z6m;#7]%oAԠ&TNud@:N}2*EiPؒ\ 6J P9[4ZjAڏsLz+wK^Rg(QB9bϢ8Fcfy7>j\} Mt'(%g!$Kv! x\n}ܙ0 1Hgzd"sD_-X N$ICi(u7>?3 /s 36qp,$Ӵӡ| 5KRj80eDiq:E (?P}$21D8@jg,ENw>jT֞5>j-q3"P^hX 8E_+IJ|gy*WTh aL´&<&Zb2[fiI" {| 53 >x+$Q&5| 5sk$YڪD("UHY3PP<6RL, ɗ~ YqItG1H^zLcO)ɾl+R3q4Ze':>jUuA83QE\]%/qW2KIXK K!([ccn 5+j{{rEI={1rU(Yט!x]J&䥔#?3،=癡ܾ {^WZVƘ,ZG 4y*5=vCp ) I@6Zk0$1=$Ryve)4wepx}$T)sB [@$N)&c#x0UqSâJ%,8v| 5s*ks{zOCO=.=$ToumYa@⑒flFˢE',1Pyw`#-'W}1-Sڸ&3n+\*E;CZ drY]i{3\:Q{iKji/SZ ˅,ܤH Щc pV8 h)b * 1wKފf=7E>!jNS<8K=$To(=>bΔ4s6)4ԙбGB ~DŽ @111g8\9 Y.3+yh$iwH Cw?| 5 j"tgQ7[91 d9H;| U9m'РI\yDe(4Fu| 5 <|#ƹt`fGB R= $UFSu^XJgP # #PqQ# #x]vlW#!h\A4âtR бe*M̦W!kJ1u812D{}$o~I $'@qI>KW#ȅs2 MWŇI63Aͺ}$To)}g}E5"?+Y$(!tDu}$huHI)VZ49m`H#f#$*ye9j1,AjQQaX"UN*| 5 QNY .M֌VL`lJ;| U>KX%0C.1`Kjk 6mCQ&db,[:>jrtREIvPVϜB$6PBCyy/["}d|s/]6;:N{x/mw"YV_]w?~ݟ~?YG ./=ٹ fsɧM|n-N<'4o`G@ӛdo1w@$/S25֗堵~ v%0rR)cZ )5WGH+XEEn_yry|KxM{rRHe/t8 -@ep}gVpBmXpu† 0*L̼H~((Ů(H,rXU֑I ; TIQmўelx.UsO^:6$qDT UjLaD\pp5;hV .eK9R`Y\6E]K(>}"H<_өH2Θrj\e $}t,Lo}ZuQɀ8>*f))(SZ|: &wJ38P"Z&)g2AR_FCu8cSuK잩xN|u]|8&>[©,{m"%<7!Q\F's&0(Q{W*AE(iB:},W*8Nh?%X`rIlg21qdMhq ,G˄<⽳"}./"_UVdװ|B qIz@0RF'RH58Z霙%å(%C.N.d⾂4H qs9i;{:Ʒ=Zm}l,%Vf dR1զ4d|Ydd2(l4zú) _Bd |'6]"l P򝶖vSFoLO8T\l [8$[hAw|)O=yݖ|[nMW#etB}@OMS)"ٻ7UκH@k{HV111"9#wϫn3W/As޸x!r5[@+LD{@n? ٢Py@ G+(%T8 y, ^JJ ne#"Q9Ja8Pp.`A@qqpxEZKMl5x lMMo)}V3r) ^nVGl/fSq`o ѯE|~iFj<^&xlDW0Gt""hYڟ`6o3Zq hQE?  S1G Emʅt&(X:X+X+7B"0#b%ǔ߱+0)R5ϩا4 -fVVɵCdv<o. 7Ah+pndPy.P)gK^-z݋NuE %1Q __Ö7 t_"…)jɳTKگzX _~cW]L+H4nn ڕp.g碋gy;*~& hN\@Mjr6σE:C|!ΞmEwu.ghCv;I~> !c@eq)waʽ3ø)alWn(he-k ]z`?rCuvyۡߩ1f$t2;( R^>m,s>& F"riox]NnG3Qgbގ,0nY*1a #X%"TS)] ~ᆽeneOmOcBVvNGTsPsb?8КrB ]ڥ=R:Nb2xv<5:j Lm9.!W+8jS|7ŘOݧ$=1;"OYB+75RWaָr<أ@F۝K9:q~lkYw4/( ^0 [o;&iL$-T8`zGx?Lq rk޲XϨ>@>}6s!F.ȅ#bB\ 1r!ƹSߒbu.ȅ{B)k4)syP\9SD^1;{l?ux%g1+0`oot|bgQgŢ=krFƫU.1kֹ˛LaVQhqN])6! 4V!eGdv?ʊ۫8[[ºmjS&QZqJydRsT *"&Dtm$,yE؄y$Q FM!p-[/5^#FۍMg?^ 3z/Lz׺,،k_7ޏ7QͯO__'ywutG(IA8 lYk Qzj@1D3H:|P$eTyu`R"T0jTԇi%1"%ܡX TB=Pr$Oǣ~U[Y?z"hj˔"80H&s'q:z''k!1,gסˮT92H'WSnN"IkEME{7F_PXJT/q_u.; {IJt;ЏŤw3L)e+VW׎NG\?>x=g5q S} }c 3,ѳ(Мj-?^"J ce9@?qSfjp7ҟ.]=)`7a}N%,8)2p?Ln~9,.1r-q@ӷOL{+"\T;mTW6hR/Xk fH[V*4~#iI)uZPjtʳc;&*.VyT䊕THgPgK`0VQ*Z|5LV}NIAR/:3}*eRXzo@A^u*Y?t  '/XaN02w3\@wi:N/i^-NǏG5|ɽǏ( %hS噡CV W(T\e?Ӳž:#mf)ӿ-o}ܫ<0/6Pp\b(Iby_l2Veuc\x'xDA훵_~-O(vRaÔj( Cŀ-c\ ?{04]G]UX+AmsK.O@L_{ !o#y4DF{Tku:cV×_ PgHPf/hJNtLSm0p3zjm;(WÓSڪ󅹛~P7ltkLӌ0o֤/"ӻaYRtު$>z cɼ4a}Y} e'ek6

GItd/geG3[(nq]⦁x LqY,C"NcʠϣV.k.PHx3jg>NLj9+j$>79 Vn ,X:v3H0֗nr[/־oAH[DeO;٪Kk{|<J]omy\il eix*Q` e +O[%$Cc MO=1^Y-t_i)#6i^(xEb * yOk~Q0v  jRs楹b ol:5[<>u].Vіl^sd7ImqMʍS7=LM:nj΄Fgn@b@.]Ĺ`1X1IKVR0p*N)IfxI#@~ir^:rދt5 &_\iō4C3A**8gT7aZ|4ү &uDhQhcA&zGG=uAZo*62ʪE2zX_Zui i1P+?d.C}HJy5`( Ft(85CDR䕳n kc{K[Qn6hT3/(5f&`=G.2f;R1G EmRёa? Ƹ#aR`^~)$4tom.,3L{;5'E[t}O750pLV3|dy&)i1Ɣ?->gрmP S}xcJbW2CD"i[/"(" Y%XTA(^/L<)XK)b΂wDðqM#ƦaM o:E8S[kt?,8.fN&7v$O!Vh e4cFB8 ,C@lNfmDygt>=(,|YWs%Sص |s7=N6,v\\X4"5+!fPPJ^6 p'vSj dOtRo0͏(JL,<-yF )8(Cz9?˖{j{mGTIO->wcZ"qnL6INntҝ^x,~Z0Y&&* +$v&w΅acjE΍ 6q9.! 2g;vE)&(D,.%=nKsFo^iV0;mm" iܹVxT!Z!MY[IO2@;bHnu?`Hus߫F@nrd`$ łܾim' ^UH"}nt87-6a'i6Ǹ3y"ͦ{]C2E[xI+y!lv~aTB!'"+m˻K +E’[͞e*wT "֒-Y˕ƋȲqHf0sF2 B-oZc|uMr%8Feg(pku9˳ȖN\:n=l hZՅD:FhM0VtI^UsV3z̵,I x kÀPH#c{^u7dlrr Xͩ{{wwD#+ꢵ6cZ)#r=uڐZaQF)3Zd=&zGm; i[NC4ă~Mi8ޘXl;-eD*֑)ZT pmӱ K d .~ǻVt4,I9gSkJe.H Vˆ^FcD2Y+냉KM` XyH>ZLVG}PC ֱpM[^wNgme絰 w"h]͝QFmT 5!17NerNX=Y,o V*U֤c"/*&'{~gK[3tEj>rN%Z}:^JRƅPvWy :S+ SSd&#QHYu2LwZ $w豉D ihM*o\yͧynķ`&TP?o0 ,#y~xL0j <3gi".mQޡӨլv_wp;Aٯ viRx}[6hE#W򆟵-m܄) W~`VǦ.J'͵'Een^Duz+z6ui/K+mta|VLa▱ك(g1tfY$XLjAu7MX7ΖnA5A0ɵ r B($<3cĜE NV.m5Iks9D=~)'_<[{ZcF5;RPMd+l/  c/r.$QaWǑ)%vYҸI4Ux$4 GwֻrUzyfaM{ ӼQ>Lz'molRh3,ee,:bUF*yLMv[=oQ[ii:zu#Jc{?Wd&f褈0aՅ)T *R[]~ֹUmaVg(Gy{q0oIz M̧0M5OW`X>QBYwwGhqfL~㟊mk}ԝO{d~Ao{۩%\{pl,/7ؽ+ ÿ Ar>ٶ d(XĔE)pKsݠ%7$oCox.լY5fLHX_^Vb`Ꙓ=E.(` (Bl x}{̙)\# 2'.aFiF* jU*c e29 )&'Oh%¬i~峊{M]c*dV[.ZG#^l"3`mPEG,6P=qlZarn ο`LʡY|TtBVвxJ{(s1<45bF;0C,hNȊ}$x{I9/i4AUbRΝm==dɗ ds3YMGH ٓ7?lRUZ\%&zqyֲ4~^+L[]Z%NPWu|,Kr{2ӣC)ㇻײ6yw9>LL Y Yk Qzj@1D3HWh>)yB6-H^G*#R"%1m- J3K`V*w,?[־x8|]*_Y?z"(fYK$@`>XheуfדȐUG/B뮦k]MFZl<lϸ#'Q왳s(TUttuF:nv3^r& r&.T5SX&p=Q\)H|zf{3w7y29(_񷷥Rr[c[8w {&m/a|5Ik-GO/ lD{ 8RK2OC G7te6۷6fжxrֆq!w-TW\p+K1D0Rxofh:+- 5$~_mH\ 6"=LH+o̰cO~-ĕ f 61왥d#"_Ase|u_/\~VD Q)W[Pi**i·` =.~va"ÂBao21c#! ss-dǼ0oS&ޥۡfd_s' bR7wp%æ~#fJ_g6 rcgmI gj4ag5sZa1X6NRxɜ'w滓қ-yFf nqE/Rp3瓓 nMUk;!S*bx[ַj2d~D`[a$f 1۴'a^Բj2߭3<B(R)Y[+q<~ }~ξ"&Wy-!v|oY—I>ׯa"U')]\Y`OYE#\8o:7N]|&__gT ]v}u笛]b\ދqYJ^3[d}%XASx9?ț03SXѐ}j6 3FtȔ8yDXu.+t{mr3ZNFU-@ryAD e.L4"2%# RKU(Pʍ"6 Y];X+X+?"D4hm9 5 L{jQβumȦ7g.`5 O >: GL_5%HEE YHu])~zw&+1^}!J%->v\Ϸգk.CRT" ר]QQ7_> IKtsŰJ6Nk09/xZSVh{7;ߺߪ|v`g I~~+.VxM(" pb\H%by n$^V.rrEiod)P<̳W=\{_p82\|JeU< >OP͋X$h)S]dEt!"'U2`C!@w~CY1 )fe  D9wmH] >r iöL,u< V!!: H[XA`Ir,a#ֱ6xhqZi]n.i$ P1Q޾z/쿐gi-E2փ9.R#sz\z[.-#$;"KD* 24pЈKʹߗ*uIg}LՆzٞھ7ɾ󨏠[5;޵qlz[4 |o{4ECØ,"e{fwI-).IV$r^9gΜ󲀩aL9(8$$yd90J/ j``⽢&|r0e<0)g(@Eˬƅ(=K fu{c<1)gEp, !xMw`}@&xwByO\6M/ ЗFZly8 g\s)q{Gg.]^IH􂴾-dTH8i`P]MtH*/ڄ/y .aߌaU5lԫfj oԲ@jͧr/EB#}^ߔpBR=\KI=ITsWV`؛'?@EiMmŊne,QIKuV-s aV-ڻ뱯lDOKӯ,0 `?F-xKI\3x4<^!$l7IbT]`X"TSD.nŕb.|nbic]ϖ?@0'iC(XR/IKW CMIjdQc_Oop@nMi>UnW޾UD~NnƴL ,|}1ev><8UitܵoSX,'(|X?H`n̷I__}֕um&?-JLV ;B-=a{i놴wdk7MFЇɴ Evd|\C7׋]㤵Wvd[-seXHI R19G?}~v=w)~ ]S_*f gMYq=-ovύQƽ X[(Dxũ>F2B@[fP=Z(jd 52P.sraL ->[t؊%FT^]<>*i4 % n)5!mIZiaqw{$v'6u-念U?ɰߞ}a_DI٘tJ|$S_5\@ %DhPHsPXJL!0"~ GmÝ?4ҎY5$V zBBT`)Ɓ#iF)&E$JPnЉ+ ]<7645X"b>4,ь ᬼ,[ ,o$٥Ϗxճ)P-oeepzk^lUݿo",(HA'`L'Uڢ xfʤeB09}в!'`l|TQ\`y:+tQnRLsIטRmAP(mq* hr1Jru/;TMa%-q#8 Ȕz\$Ha/eTVߣcB)7j<[ *DȢ`;X+X+"D4vp.4kHL_ҽCgϋTԉ"eh5{gʈՊۍe6"ɼ0CHwx?vrz sZD)Z0la%]=c#ѱ\|ySn[2]dǽpmT[]mEV&9FƾT6SW1LF x/ټ}( \F߆E.5|wvͯOIK4In2*@YVyυeiIשkqZ]bhhE3G4gG<#ҹR)[8uH* F*Xt01dqgw9B )!CJTjL!#4έV<đFHpu4D{Q; ZDl ha[0 ^q@(zWE`]hgY>=-SmEI &<'NLWMzFt_qъ#O$FݟW``Vv}3*$*\9W0!x`RXD>P;}dص&*feTX5ѪG VYXoח{"?y; &LG 1k5f,`豉eVHKD`W@>g.80$5Ix&XF|0a$fZJ<>n5cf_f%:E+\wetٞҾ}:_2HKχrdөLքh-R܀+)& [Q[M*Ffd j0)[Nd,]JjDinUjY=,Sþn1/~Ba/TPE#&xhY %^F10- 9¬}EY17)f:$ rlmHsXG< BXe{E2FHHjbKFb$S, A9,a#ֱ.d%h%-KZ>%mփPr)ڻD{ѪpxpoL8SfhU iP|ρ>Į.^%GI0GOƩUƥ`ϬqlG/IAhhi8;}^@ rH# be}0vMH0<)c"ҝ%lk}R Y{o?]_?uV-%jK.rEMo(O = ȦwZ7kOmĶBL 82%\ɰ/\B.K;K̂|"Wg,'PGnZXPՅItߠdRvn+kۅK2]\jr/Hm&K aoHJ*ƻXbjh8ڽW\.vv6s꼿eQ-A2unQDUY<b?0i]x'zU\r1Kr5yQ|Пv8 6'Q7%脵lax?o 9ߝB{Tmg9s,I{q>^>/[Ƣc\W"<*BX0taE$<]!VS_F+c ) !$*8ʝ7\ 3f4j|HbaJ3"Ǒ7ph仵gUۤW&u}}HAa}K7<ĊU`kVU(>gW\\Ơh}Sfgrv1r%rw5#Nrѿ!o/ތfE;rfRtA1xĹz`.IcH2cxDz"BT?RD~ T!2-Z^U2ųjzˏPVk*L*hDA%?ů]*}7{=!=Alw,3\= dح5P%LA/=ZR'վؼ/>_%4t{u ;{]ҾCm (! 9~tksO IAX ޵ܸ]=4MdѠA?o-9W}zC6=UӃ^Y~OvBL C*;`Oh[=-$vZ<ŧ=dOvj{h@;nY%lA@>{;|RC %FC&CO;Pe zI_zy4à!>Dx:cc;c9e>x* 9wғySG Pxp4asm7^C&kג\ܵRus\󐅇Q]|$*h%>KEr+˜RE. bRc6(Xd8Scm-r"iT[nF!Cy)1QnoK t0xb Q]O^3rtWeoi5j\<%MRu9'>K_Am&{V*8PS1\(Қ+qު9E$ {%gS 3<{`Kao`Ƈ[; b`onqxX >{uVz} 34ٷ=oKjOR\^Ek⺈Yڂ- m|,R{#Gj댴K2w8ee/к-l }gs6{Zge!]nq=.+!#p~fۊ1ACE(%kdFu4CYGͫqyO&9O00iN" {)4K2?]_'[m~|J[x W*QOY;_QmOGU9tn6W:QdrWWҎ Z#xwhb{`Pog/ E2 -߾;(*zD%e٧$"Aq\}>4?zg`ps,!|slz3tR8e )|}o6>&L'EvDq6${ ,4PQ,߲ljj_r;J9rwsyӫ=yUJ/fָ~wyY X`T9F$㑅H>H0+)c"]k6llޓu7{9BKgg9_ꝃp2~yNc @QAsg`R :d`:f>N%W`BӲд-x@ݙu ɞ|WGsoFLV Nn8bJ În /3v ,=!,P_XkB# fyǘwaXx%?}&ݟ x$ gZF"v 3,| 觬,iD9)R/mQ//$VYzXu2LwZ D佖豉 ihM/k~W46fnt eUnu~Fs~0mgu)w=̊>J%-5ΡsQeSU~ À.&Z2,@bu6oyiݝ.V{5գڻZg=/J畖! 397y@~=t\*_Kϑ+s:iFH~tkW`lsJ~ؙ1/&gL ;޴l^Abi3P3AY ?coiLiC}-K"~P,v<~~a[|,PcK)R~JR,Xʲ73FQ='AG}`8]`).$JzQ*0$D=ܙ%F>qE͵CFeo6@6+Y8.Z_MI)1%)2N{_IO?𮤋W/?߼kd&^M  )Ie\ G>r@RưYRc@AbF XwCMQrVF&FBE:đב*V8Fۀ}"sȢaDR b lX(O\ 7 gdH_bq?|QA F)2Ky4H)ʝn;avw{sD=JY o;7D$aBPmӄF6,rOU$]bE{#{^[3|dW|_hMߔ7XNf!ͰPzy֟|$q\g &e@%+\YdZrF)J*k@zaaE$mYxADj^MR;PRLIƈ4H+)Q6 3YXZʄ1+a(N,&DSdE>%M^]d=~k9R)i$(*c3Ǽ9ĝ@ɝ#}OI/7\K)6HjT"V;fQpnQNGlȁ[GCtU:8`1waʽ3ød;f߭ A2{&]ϺB>MGuQ?i?+r /E;چ:y^!qhNM8 1<{EM:څ5Fn0Iv6a0]譐rvh^J7䋅y]qax^WTiK3eM0ݫ'D|A)+_RFهcgeHjXF|0a$aaAE :cWM :ij#Gk8^H1 .oW1 ^`A Ɯ\ qWCZѝ=&!o6 +̟rWھ_ӛ.t*yǺQk %i_B&g~$h) Ӂi]Iv@'eE.qVq0 A3 ('@c}9@AVr=3t<10BB23uS/N Y$53GfXѲ㴎:N;&]5)ZL~t&+K뀷;?ɾX$fFK?'ru[^fi0+{Y7\$`"{&CDT2)RR(Ŝ(aH|| ߹O2poxOm`-h?sR";&5R3ї:;,wǙ[8c̎1ǘuǡ5|uwfL4 u<S I7^m$J E"m<XUh>Ȗm{\.FYn.i0w;Jl8^Eφ/#h~'3+:=[.KuVvPC**jT^Ң3h1ocrdR%Kqhx1 VuDC_E_U24u7xے_'y]wG.| L=;x ,%G WUa;8uqm}/#u1RH]|hVS-ؙDg.}qA/=mXcgU]Ww^ꜱU.A▱Y6½c`)[c0sHg[ESz beP 6JzluJk`k.PHx3S=x:T z4‚SAӏ+y`Zj 'G<ˤ>&Hhm8 "l7K_9&PWNɡ%(P4eB5_S/+jW8vLH БH~@nY>~1X6Nm$%]f1*eN)IfxކlNKaCz_-Ll^GCcͫkހ]uxq1vRRI(RgT0˾yM\&+eR1EY 0;˜pԛ:O\'ŷPyu(CYMOkȮ]?WVm΅$&ˣifdQ>>xY90/v{q:~ (=i|6}G;L=MLJ99` Ծ]Mn\/=l# 3*131depBJeu@Hc\^;PnBYʗgK1L)ΜV:{R3m~4|fge&yڇlїWi#hF1ZleF͝QF6*GT KIw`P+1WN).+82,]- $袃~yP=$)^o8,q#73\*JGfTR7c P4pЈKʹg^;Be'xC&uҭHծG+ު1RWQØ">HJZN#9p'5sa*^xM"w (WK 4~Xhe;!@}Cen܃O@{'`' gaALnK#VZ[Ȳd6N= ?\~~>T؋6QRh5O{f{w7{eBs9拫^n]fi[D)al1ɸ@ b? KM^ âYsz8~hѣ} F$:|2!';X?zkb? sj%]彿<7Uw c&<r@eʒ3@c+K&ID0 KA)yxç|p:+U?@0OP& -,IKO |׆ѬxMX|\ίcとMA.}ML!>U}_}Z%wvPl'ۦ$hcZrXLkx9%x.ͧlݛ7OeX0Wq}y*>‹pRAjAJJ,U"gZҖD 3~]}okJ]k4ԡ*]ɬ|n.Y+b̿ĵtƤHyc+vm}~ö"g4`)$Td ߂ "R7`WZJD Q)@+-ҌRL(4Hd¤;p$sxuJHVF E<Nky;ύMdXA;m,"km#WE.v:`3 pf(#x߷ݲ%Y-%HbwS]b fHgB:\p:Y-{vijTGQ&)]/ؕmo.d(9n~! ż KD Ћ Ћ Ћ Ћ Ћ ߁};AB/.B/.B/.B/.B)b7M,yKĒ7M,yKr7/yK"%o;Kc^FQm%;F9EacZz-|$@* K’d,, K’{Xxɸ{%bIX.e],iKr@3E|D^jK ^Ȍ ^`A <". ދr(]#J6HGoTf4| g4' FQk %i]u"+D{-"cyaZD"rRX ²-Y1 3 `9AQN )# Ҙ{b)+{G #$$8XG!1du4(&9bˁeAZAZA)v;(RO z/׹޻>?nC?U(wqj4Mq 5N|1by:ti` i0K,V/2ɽ X`T9F$㑅H>]SFDD $EV 1lkB|FC=MEG,?9Vho!^eϚlগ9Y:7&)ug:n:O6~sw%g6VC Zlͬ1"[_p$eͿ 27 r\ B;pusa`P[ ~ǘeyU1u+ItנޤRs=@h ̤msq m]j6qür}Z\eۄQw[Ƒ&f÷o A[&fRxw!cy}ˈZuzwl*KeC5B_GmtJG/^" $ʌ`^´b1?%7iQh 5T(¿V8dB3>2HBtHC"Or Q'c֚*+Ie1>NjyেpIZ}J7̡]~`8q0X4Lrg3v1<|wMcߔH~]Fh>Z4?Įq>tk'kN_f㚝o]$o/DBDOcYvWUSctSg*~W3R'bE'+#:HwE)-B3{,ŭAT"V;fQ@8ZaRBL7* 4k (NUvX-"ro0CJf/h\)"p d:9ns3g,{@/z@۶o篷bE%:* )|2|cOW/tl2 6Qgf4 '㺺m\1 ث,*r +*l}*;zY&&VhK!zƌrjg>cĜE NVF_Gz/;%ݘ0vIg8Xߏ)c~r?fH/FTv!ikP Q=%߃^PqezUcGF1+`<=BxK~ IK" bT*-9R&Bއ0uz2<5NƊk?ךOW^4#ۨw%5BB.D$^R.oükr8`RGTQJmŌ6>V옉dQj?qU?u²YB}WFmaZaZ#JTvŸ;@Ieo7d\E.m-*1eDbJ#*"+tzR4›iF$ת?W>-$T_,KIvKS iR*;#;fgu<-:Q/5ӘGAc,6ʌ ;mT 5Pbט+d2/Vp¦Eb7jl7%/Fم2.$?QViOr>q ,=!W,P_YkB# f:{}~{t8LI*1 P"G)A@`i58Hl,YƚLG!x0k5f,`ZFL&Fs ck*Qg56= {Lafݙ[H)rw$oיL;LU;n]Ouv8ҝuhMa4$";g5À.&="ezj]53&/{lJվsn6z~ihĢ祖!w-N2n_g[arcxKO]'!_4*ߧb@~sw7"/vPK[/dK%y͗lS.5XEYWO~VL4y8}/??~t2oz~WTH`~##xMeD??|8k`fN|`_c0p8|L!gW9~0+=Rl4v O~k](PZ3OK3ֲhRmXS{3/a:5'O:hC+\`8kfI~`i0}tt 5i tǯ?Wa)mw2tS/oͅߗ_"֪i(BBŕsWX &e@%s`;qadU>z`2FG" a`?@x(Ma$ǴN4rQi=n?/~~Çߟ~vx< fnSGL$Քeȃegt 3/A0RrʢzI<"T8SRG=U *=L;I,MQj]Eł6%LF Z9A@MP:P%.}D b%֩_J b|&re>Z^Ú5opyL#BB/8z<;&aMEꔊi#"Vf01-'#c& e9Q]D9QjND9\pi9Q"J͉RsԜ(5'J͉RsԜ(5'zWFL80~Nֱuݯ^O^rn>R<. ` B 0l "kpXG8y&KWw10BB2 3uS/ H[XA`Ir,a>\:qc.悴S"vPdLr+KimϽ[\&BHJ<6b%9 {d # ˩t2~5nT&i5^v]c/,}1 f%mQ&$ߜ ~_W3m[}yX'_.D&^eܗY}ZVpSpK`"{&CDT)RRJnbi1g;Ja87I>sӝP30wG>mnso5#JIC/Pǎ?VYepY ,,0,\\Xgŋc`,*W{P;IԛTjn<b 3ik\tup-QV>rk.V~do41[\`FggpV},I:s ?BMjxHm"c*%Gsg޶3kĤ-Z3I/d}z暩xvhzv twIv:1lB/= 1v磹OK~5owѴMj>ٹn[G-/_«oʛ!w͟4WF9k|G3ei09[ʬ d;Hs>MEKwͷtu Kd5I7gDSm7dd؏sY,q<݄{S")Q$z$†$Kqg52r8j@R߾>ſZ_rj~z:^coٿfī:{KB2`d)/-s~ޯW=a;~^*r; bR@ Ma\mQ_q}U}3㛫/C4-n H_!zU+rQ ßﯫDa0𲨾 wJSa7W|Us&z{@s Mk${L^8'^zU=b@ԏwoT c8`U>]E_CkPG-^  ̍7NmgAP߅ILˆ1h t6O9WEE\%[/_y(:M]'+~/L\=ly&%3'L\`p]+Iܢ\sq\aC ńܴcZ2yف度9Y\֩&#{h2O ,:;h4)@$7X&+ýIT02uLh*"6F80`8sl1&p*"T I;vg8qX12$J+-#=cV9]d=0B F3wNl9U*&ꓳ犅+IjsRې*Vo&ٜLa.~_Mb?If]KQ&~yԬt~4%&o ~^X[j4?% 6B>K聻1F0#X|ڴr7<-}/5t 91ֶOCm#K~\V]aJ^zИoA{ΖS6uc6l-痲h~*ۼ&_?<6q͟,oMMj@Y|eޘb+~E`P6bKaz5W7J2FIu۫7hl8Zqz{cbmk`Zoo @ԶLיFܳUa* zImv6-~7R.5\~uN35p]5g.Z3ܿ7O~׫@`MҀz0sowͺ2\׻ibaqǸ'B{bsOmnjfP~& q0X5 BO}n{mW:d[-sD*Z)> k׻?OυU_oGޭ*7K_Ȃ9̿y8&5QF*Z!A&ڌNUV@R^0Z LjqIged7Zfq8m<& imSsԉjr (ִ惓's3f[udi,8F+[NnO=|[AvF4S^V>kAx|`B\|6#ڤ\U^"lt?ؔ덟/\01 "qHcpIx,Us*# Ƥ5%0.  'vZRNa gy8a$֚;,S;-k7 ^|{kg8TROcE8e %JKR8b̫¼0 ]-^fߵ0؝i}ϿLvq: &ds?}-0ƂKQ,D#١QSTὌP쒫"PdD MAiOxTFԮ.o<%&+p'5tL!po&ē\x=5꜂TIY#v6$4z %fPKZZ/*hU&Wzf՝%H4 > ;1#vM -A3(QA أF Gm&~Dp݄fem̺޿ګ D;qsvoϏ6"6׹yV2ڴW ]}UAebi$yִzE#Eſ}z~?R/RɵXZj!Koqf߮=V㯻{{](%/H-zsjGvk v+,N}hN6" 8G+vll?:;&>&ZKqPqĞ wgJ`MPJP R()SI( }]<>k+C> 9UhmiҝJ, vsSԈ]_7!dz3 WΤIG$K)8!ɕXCqs=z zy"eEz Wc qATE8D|>_t"X#'#8?HV" W59L#BYP1HTc411 o3HhF%R 6>F8Sm]{BO7}vD傓g[r_n~N3a!!9N9o 6k- H`q,g1`r*@м6YX L"i?  RO|FѬkNV)Ilͺfx3 sՎRuF'?? <- 6X}/&kVkg;ZʦdN-^ۏ[NR'QR &9q4tJx$H"%Dd@}'rړ#seCYK$I؝Sxr?}[4/T0fo9h sEMP =& c^.d^@9e"KB*ÔpE"F6 غD0yTva b }E6giBr9Mǣ~Z O&os]ioI+?7<"/̇i 2bZpg1}^%HJJFK6X{QN~Jc0?O1x'&BI뫮-(mo9,_}%٧#h>~-鿛mOFYe9ͫ'|E 9#'/ݖrp-2c pt>y>9r3yp̗s[~ׅOܱ囉i¤^=]B+Gi8!;e1}z<%p`~}.Q5E[%C㺭3!iyYfg4z K^>_qohL'kg:N;q͛<ݓ. bsx4_NPxn1xu1Ss YaC2޾>ჄD猦%HUE(D.}V4`q/}/,ν'ݾ>yfj0x|Oi~e.?Sz=Cߊ6.dSrJJa@⎸UUlKY)]4ڭ4-莔6(c} &GLg[9L:©ZwgoY߀6 W_:ڿlC?i^q`/, z85o̿ % zzECY<# z\ſ2^^bߕʿ#&RV%S1sn0Yj΅,~d g|x񗫆;G <(O.ϧM~WqZ?>JڰgJZ az0Uۃ;r>NzMnqU\Qj9U]):oXťC*/Pyy=CwBiׂ\+R6 \8 Ѻt)wmO] sMKoTM>d9Zȭ YYd"{'bܘ#m/gχ;NBo^6LܩnS+_c"kgn.L_oyt"w\x'ԋw3_ϰ^0Cmj[$]e)sb~>:8$OkX+mW m*bt,[|M4l멧A\K_M= Һ'.=NJKmN tT3ro.Ϊ/C#xY*RBqplSOYH>woRˮ8*ι'/?ϐŷ\M ?egy@-Ζ;˫!/ fղ12 ;k 8rzu3REÇ翭\ʫq>hܬ`M"jL(lipU](URSaL oPPݻfΗhA' !]|=A85pOv $TLXk|Y̰7{kU7 ow %n[07vriم]lxY C\Ӧd92 I|E:, .ܮee _  j0Px"#.M,\^J.G[-f00 3)<"Y7-q _UXP@jIPI"eLBȼ*@& L%1SA^|D$H.:/e̊G̀ p V"Y ԏD?^[QI9m,9xY#KV]{]NGUD|ҷJ>.F"G!]JS0 л"XTHT`%\Ha("Ft3 n桔%LFKI(c@AK^/B>Jڝ`bF F,n0ZZ"΃pzP"\,صFt I,PlV=;#"kTE-‚y(ydQ>&vH429MJ6a%WHjꬌip`Dh3ˑlJ!n5QRpM h+ݭE ! lI,A ~1dJ-S6ѦiaA[wy%7;h^f{i/NO*Zf&h8Pш`A 4tLGs fc c0l[Qdf1xjS֘k>Hr9)Emn0&MQGhrz4L@]aFhQ"=p Ȁ5ȡM!ds \A7< H]Y.j:#S :( 0rHAO!HHY@EX716m6 PoyE!XIBivE^2`X0 RL28FR%d$dj1t?]kPP ȣtah#ʅ0AT80v*`cM#5~if=P(-g/9iH `2 I0՜Z'kӚZ"AVAqś) @P  V܆V 6B 4<-p"ˤ ̀|"beT^H,ޟ&`JnÄ '9>X2 'ȮܳG:㽱]m6FJRXD8l甑phUeFCt]XwRJ,X2/$#L_]io#G+D s14<%nSLR}-oD%XVX"3KpuE@z3(q1yT# ˠѪ7F5‰taI4>j Lٵ=&f*GpSp:(p6 bu֜jm$wZF ' n2EȱJk>nEw,ŝk ;G:\cāw<8\kFZ `=#A ɒ N,ibIKXĒ&4%M,ibIKXĒ&4%M,ibIKXĒ&4%M,ibIKXĒ&4%M,ibIKXĒ&4%M,ibIKXĒ&keI!cI~8BdIN%}h&q*a\)Pt|9,l1Ʀ"%YnժcW r,ml4;2̗p:9e;xcTXٴ6ݰo`+n? nK׵aΡ/昨ޭjZuGC,ujhD~ O/8̿@M?+s"uFJi|ȵAQ9 ,}Fw!-uY`EI!%;»͎rč1>,G\l)1|OHKIĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDlHĆDl^׹#b\c?56 [35nf-$Sx@"D< x@"D< x@"D< x@"D< x@"D< x@"Zy@R,~D< hWG] Rx@"D< x@"D< x@"D< x@"D< x@"D< x@"D<YXz+:RS|붸oNɬ꾗FW1D[{s<%a@ m Z+ӖhK-`է_~HmVC 17Yfޭ&H2{CfLx-g/G٥2 e0Ӄ1DڟXՆD~Xմ?h_ޛaϭ]ph? d6 a>FCpP7+ts? aX:/C78NeS/20T rP?{Y2Nhoyl%ne0t5&9{7 A9`Fok@B>ˏpuX2u [-W4Pn`o?O۹qγprqج-jg?}z 7Wo lU|hPIKFl<{s80 p&Xz[:`#vcAq,NqdYÅׅZB3[ mV!U!iSQ6ʅ>wT)N蔪 z)-dC!}UV $_:ώ GI)}R hǵY_U53,.ܫ 5S߀BXAh8sD‡k-Zq,m2u!7BJg5G_Lɍ*,W)PjBd[)}(+B, _yY4/ÙWl י|ľȼ(O ²~^V!Bh#4 upVj_6nmnqilo[gag~uov9.៭}v??\Mޭᛍ19lEA|֯үumRlipٱ2^1I\lҷ?bc(138a.Q;@b/"j _=Y=џrZl}`w?u۞]7e`h)R$wAyGrlJA/ ghŒh[VAʼA;b&[ү;Px njnGIQ^T͜ !y-M 2̳7#입,FMmB(B~t:}||4z 7 sݳyo w\Ѡh '|sE_A9]b \1ʿ{ @LrO] 2lZfJ{3x4: l<2*Ύ^Јvʽ b3H[qգtiQO*Kw֎56cW/~[6_,<67j4Q΄Gl8{{ԌY@qj{.^s!fUݻ߳XrY͜)2 Qsc ƚyɑy%%RUY,-NaRv:kn1q<:2GgÏ# 8;!Nv+@epg~_)jtrz]L%Y[Wɤ_o_}|b_v!0BDu9*ScQ+bB+cQ)jmt>"%y|W>~k6f~Ɏ- [[Ql܁o)y@h6&'fi;1=p~ (@oӺQ—ȱ? j>Q4D0eYRm>Cwc7I\e#lɸgL^(5KU0qbm5|v;knL".lYlO5XƝ!'_:e"|YϠMA &wZ'B_/&& _૫Pw9A%8٨bC hw1Gi|7215qn{]k%SQ;v@f7RYkftglN;q5ˡ9;G9udN˷]آ0E"i+q zwda[*o?NnǾ&t5EtA*S\}9jUp#k E:2v֜ݑn"3xl,BCIcEn+l6l&_&i:lM Fs~IUA 15 Y0w&[^4*[᳭wTy75ʒu]d-GQ0QV!Et`W2%Y,8S.,QKPBA=@b:$q 1}I>F> w1c@뢕eJTtcrbť@\(+q$bR]ؖ44'pq[wL Qں^fl-|GUüP9vN#ڜaӮ6g*~,bko}hb+Y_欀$sWg}+<Ң~v}^N-Bzϐ`tvE[RnCޕ6r$Bi0F,fl4<%B&>US*RRW-YQY_D3XOoCo6Vo &M-Y1`)kLHڅh2KXD; D%4BhSzmV+rncoɅx`J-bo0ʻ@ȵ)[gB,!Um\Rw7>>0V Jjyޞ=?xaj1{a3B⽾T^eXzre8g%Xʖ]Umv;w(E%{B;K/ WVDX " -/*NY^XL؝LhLr8Qz< &RkZj\a!Qetz1VZ5  4q0;c$.#U!2kv[s_m4_=}m8;<ȹ=_,CF5K<i|گͱCG'BGL2CGoos+&DZE.!C1? I&(ZEVn*Q 5CdXсx)CYprޯ|^Cck7OW iy8q+ Չ נ M_a gsW506"<PzS牫4*\P֮' lׁ񅁮ͶCZ'BZJ;X}l $k#|K]i" NH_hҎH|gRBUt~y*Z?ˆ V)!$x0E@'bpSg?6֜=lvĨYN.+w5 o$K˭s%ƕ%.ȕSJ.ƕeR\CL+1S)xJ]FjB+$iu1 R]Bj̹+R՛QWzW19ڇ`|j?r5;ڋZ 4j/*祮Jwꥯ*A9 usI.F]!KQWHR87S^BrԗZ乫+DMue YX8.VE]]ByP=aRA?{}oP^ŮGsVfj|Q@3Gs&X147D&X=Tp;Dj7]H 591v'CIଶڡ eLEtA!ZNk|~Fs~}e3LIW%jԗwKyWSIײ2}w>.Drd"ӂIx1AAD3 B(Tm1)x)q{rX$ϰX[.RB\qc5!.Y#\'Ftbh4F(q +HM7cV9E\='0B μs@u'=<s/hYfτx֪&ٿ<,\]I)P}=$g{8HŤ3̉xɴ 㮦}*/tq|Sf V'UaŕwgwNJBbi5˕}8k*vaT汖A|>x7v6qy".2=rm1>Ӑ&N{ϥ98{Iu,b˻<(+Ϳ>I՟fWJ7u+D=N_YΝd'4\Q\"ǡTKv@nre'x_&StZh%9yDg#1'Jɥ9+.HQ?a鈥T~ί#XיlU•a2 {I漭cjv[^=\qFcGܗoqF[<^5<'㯥Sb Ή_4Vb]rfÿ?NKk@HÝ(ktt|PYf8`^ŌFBONƻ2lkó-8>+|k8 ?'~}?ȍNʴmy4y9̾鼟DZzZ M極7!QL59Э*;_k ~|hI)Q88Igdqg7Zf6BZP,J2G( P`97X,:r"ɂ9Kǒ0SI>>}ok+).K$U ;kξ\Ris٦gpUWiC&g=^4pAHe. 矯䤋c=eN)#k=\?iiGB^LxDЖ)g0?E#BW%5ݧ%^ZxКdBGS[tnd䁱fzSxTizݞT҇*&hȝA3CMPy3),8 j=k٪kV輢^~j(;TgݱzӰ4%yj)']#xk|T2ɘFx\ PFe =H%x'"V66IvӔ~yw| Z* ] 1qHlݾ uBA > +r΃K`D6#ǴOIz]g ΨEdr $S3-+7q(deP A|bDSIIÅ@]ɍ$!Bb2M$xUkT/96Fl5t߂h1Co3)KMqתV-|u5bbr" ob_+m!nL_詪8$ +-<x"pf3;y8SSK gL96H'Q…32Y"9(VqI@io$Lj)' `u"0ȔsѺD3DHG4ǟ?h>CQ,[th*E֏^m=Fv79!(we$dS<"1 ۃw0yJZI RƘbQ"))yVɨ/"zNrӹ$\'oLxk!pMbtGF -n>;i掟Ꜳen*8MDU%@\ƣK*G:⨢Qj2#:$NoOd 6 #I%||iXq3J9,,3* UaY({YxPYLQ~Cv]vn6KNOş p86=Fi!60c(Pl&"I{DtDҥ#J]b(,"sڪ=-!z4^Q@P Hdi/\aXg-zlhTc=DbhJTPIRQ5D%UGVQQd 89紞ia`UO4~mDN0I[hbV`+IiXZ(3O^nѷ߯Y FI Sd10:0hX̎tvsR|~X Y 65qWoO ? 5YL7a0B29|e|I AJS ĩv!ڄp`2QfrY@'DL褕uрy}nsOnqnsv>W@Y;Lz)寢N9/fA|Yjt퓡gUt"GaX#75%ְS {i6˧_ʟlׇKq+~OE0'1>r9q}GTv6>*yq|P& G \(>k9x5ӏ3 t&} /dExoCqWؙgly~{FSlhs8evƋ2Mc{/M: /j~w0L&z&|/˟SDaocIaLjn7ry2h}^+fa6'$TN^ȖNrQ*RHxb*':s*x{kC.w&kzKi9#9#]B 0:z.|BI <XC#w&@)Q P)+Ki:s{MQ*. ZCn1Lq\P8?˟End~vOS׫Fooxu#];nW^~5VIGH[~ :[z?*[vfvezTQ\)zځϾ7_ q=M WVDUZWDT.j[9 _Tpl<? ˠ獒3Y0\R8$ J.V轷 ur)\ІH3&Hyy 2Q"NҝaJCv. 6sTBxdzCP}KD߾9*1#8: zK[ wU=~XTazӶ إLy` QƋ)KVYP<2J SD"U29$D㨚\-d`oHhY#TcFxA@]Ҟ-U1%T['4uD-ܻX:| M+qM1kmW,KC,4q&9Uaˑ1@J9D*lR-YbG&xBˑЊd$`qg \(>R9 :"+R|Qܩb1J&P J@mBPȭi5d-;65J܎͜ jvԭ}r_Hڬ}mkyH)cej6`(wr[vqMj% g^y?d{E}ÞGKc_bv-ˋ O m[sd2 IT.o o~ }&S}rmSQrA2^L!$)5r2>`!Ch`E.F\5KWZsѐJ Wߍ2;zA)?ڏӈUj?jO%3+hWWo}( U&XW\s1 U]\e*GW`~Lwק~/x_? Ȑ/W1&+w6hz7;Gk䗏;_'}Ø!k]|gɼ6t_睔IghdLasn6c_ D2%+pgJIx|G290oG <?L㼧b޽9{E|9GB^-Kxec+|؈+xGBPL6P_&,?Xp´:V; Z(1* i1t!2lD½{Dܙ[1DAʖ #F:*)Q$h>CAPpJ'S9ըtRlgx=i*Nf;Y2<ޮɥv-$Z.HrpӋVT*CQf"DF3ܫK!g{2w[ZGUT2JreHnX#S'zP!ǂA\0:I% TiXq3J9,,3* UaY({YxPY?}ȁg7w 7j毯_ـozKl+ )< E@<\cBP(#Pomn5Oː?1 (6T9PBP#$&Dҥܖ"gvQX0.Ej"U/{[BT%!N h"hQx"^"C Iy3֖Qr6_K籞ǖyhJTPIRQ5D%UGVQi 89紞ia`)qsaɚbVM9f=(N/(l7,.' +ťfjLBvqvY.ƋVG $F{7*,RnLeRDc+T>h/gQ9}H \nﰼN/aՃj8bqf.=ZQM7a0BkPs}ˌ},)Vқu| o]Pt>ns%ۦ{sw缵v~z~sdYM7g7TA҇*&h-A3CMP3ę!8,Yֳ|| ..c8Pr|[?[Z8b{nr 6gX]Ɂt6viLR)<9Xڪ< 1o;cըgۤmF>غe>{,TZP֙G_oE_uhO];P+U'cB]sFm"I: rA Tε a0DdsI=H$hω-0'lKKT6:m؜"b)Tס}fЉ28 +B/Lt1EOA[1rU)tN}XF6>xkÞO<ߓ+:AϪl`z%CAP̔E|׷ttռo+q{AuɫJF+Zx"΁  %B!Lw*ZCAM-11Y Otƃ4#CÅ32 "Ոi, Ms]*TG1.5)<҂F00,F.؍\>'oCӯs$[ o7qRt A04Rp@MF)σqه]]~n>:C3];n^.~NWIG[Ϩx]oKrW|Jr՗ICe>%)s(_!ER!ixci5Uտ[U^05kmRἛpqWT `:5M#(~GߡJKJ=~-6z"P.z;B;u*O@"MZ0C۷@[84bv1,L[V,pxR|12؀f))O]d" c |4DcYO[583w+8v5=ׅo\M㕆GJn-I湑*YG_onw=*<*VZ8:Cw=-F_$<'RM!V@2ލI6jRԓ' Z pO=H1 -q Tk`!`;m}u *ҶtGiN㌙ *R&lF)ӵR l9s/Rfl$ 4QZh84#ߢ ۸WmD 8u1r @jJHo]G^)d:z=ϣtöW O )su rI!aPM,V(|.Pv^KU ,í(]̘:1u1c@9i-d)*L-ADa,SBGd'|:XYOŨI JŨYT[; Fvhկ"Dg7mI}A lV}frV WY-gJrMMH<1YAsGvR襨.88X&3*cΪdS:gg,0M&'Ǵ\%2UuMg7?~>N& 57bE5iGcqpT;fPqo7p_jߩلln0N_qzE-і|1_ןXN2\{e?LS=Clˠ4Z}~Ԓ_{c^kV _^CozVBff7z_>oK8zw?.2:qDs=DIDK$gܽG{܁saegac:[(Xded\8W."D),i Ĝ:%jp >eLc:3#so87:;ΞֹF6lP;>oܴvglW󷶡=:Q)TWj+#!cvG)דYݧ7nLk:_sE]㓩p7Go5/TZu)q7ڹn'=?6ݼ祖67WK{z h#js?OdT8Kyg߁枲'* bk)Y{ͽOqF3m4F$"P!邊:=ݴ,(&) `i#B8g+0>6=6nx~#HwW{ee`ѧeŝ))c\qtPJzfO"y;a{| 5 O- vYA+^9~QX8Z2(g&#$e<h1L}S) pPsrIk1}E̙V61|LY㢵9x:$_XG>sF\ X񽃥>; (L)/C ̦gniSRX %m^SǀDV*)xP2"R[kadxJ?.sv/YXRLkZ;)6U^sp\bePQ&z\]BInwI}G{b%ofMlRiգ2p8pEI QV +\|r7o?]BpŲ&!?&㠶qfu/tE~8ikb-4L)iB=[~*hbhSImԟ,T顲q{cUqfb (^]kUͅ+WU֏:WidIlQ49@NzɻD-=62Mb7dh_kxV&>.v&."Q7H;W vpgLBo٨*MKnmzеePΗ5M\iTȹ+i[Ť ?͓v?i? NFIjb2pGf_}%?ܸbcGi+Pa$7n7 7z0˄Ȅ1'y;Xvx0qpmqoi k4ZA)> eh)_6V86=|ޕUAxdyT.9οy.YڬPe^ڋb&#b`C=ԯ-5L`TMbjD=ʉh#Vqx[b3O&Ku}Ag^y >[jQc*b]c@YZc%B -@fwo$ߍ_vF+)[/Ki,\5 t~eʁ% ^NT)%_~x7+8 Xtt"APxf׫@6>RE)))9M٥(B̨u*)M9Fp ?LH8c‚;q$^BGʛKںpR!e푁eKސ ȥ;g=9a`4\PRb*2y)Jl= e@ ?aR`*ٹVc8\L@u0r)HDYۇiaZyJ.DIڄ.5$ԭj62T Ybť)LG{.dt֣" 2@rDXsl:5V. ǔֳvx]n,`MM)8zQ||=hE `j WO^ +Eؠ'77x"wm+҉$J4ݹx[~~(I kQH(`Qh{G*,_4ϥcAՅM@yk] wɷ͞|X 5"}8#hT9>rD&wET 5 ) f|L.#fʌRq'gDb&Rq.Sۙwh<'o^ڄm#4,y1ߵD(llKQ.?˪z^92̒,盳䛃I=3~g/>-A_*afb2լ-wV0#|JSce{|3{tYޝz82SlDM˫)GIH6 bò/dGm2tiT%IƬr$Lཤ{o,|[8!m"osw:BÉRݽcq6[/-=f apTȬӮc]5g 5x6lȩo'2hE_sYo݁(i!vK0}r,@JI66~ʃ2DR "=87\Z Nߞ4In'⑟CCċzK u[.x55W'>߯=nk籚ZdQ-E2I8,?GcXY=߭4gC'oluj@3y)% ݥKWXŰXMh6̬M~m~[zV;_lW;ӫp7޻_?-lWhGf׮'-q>4GWES/ǣhǽ0<3x4g.g-ek rЅOg\Xd Pyz{ŧ{aF0M&7XAGCќ-D&g/ 0߭O7aKq*-<@@*˳8u); $Rq]ؐ5֦*8E? [>~UP֮-bW@z1/ɶ !-Z ?ئn֎/F* 6yrb3 S!Wx#4_wZv#e/;e_FTJ I',¸>CkC%W'*|3+=Z 7ޅ)Ic:FҦ,:]#WCegva^tt˚I2^'5pNYʂQJn"Gə$!ǩYYݒR*:ch(Xj#jk,Pg:EKbLx-*UvMJUsk8x4sV[S4YʳZgyqRrsu~lo"MVU~xT,X }RJI"ѐH'i t,-DE8;FRʄo^I灷d!0ԥ< 'C߯r1MRW}5@4U8svP7)3\^̳}ɛ?ߏ{tCGUwK5ۥ{3 QseH@{+;4ĤJ2%3 6XGGhWg"=+"9"Ġ 2AUAr*%|^ ʕ@#rD <=Arh@{U2B0ʨ3&=A h[+CJNɊ:ߩbD*]&ruPU)ШDy &Ѹ5aۘ 9;r8Nji*ؚz{Ѡz2k-Vp-c85dJS3iv˭g]N{uHYkƦ5rK|.H:ꪹúJi[}m7 ;\Em,9~ufṛ;[n< %o`h9]w-,}(MyPBiOJkC9[J@2)uq#.6 le $!V4""9'$<}.;Mt]r3?.hn4s4{!*ΒrИSbWKUpRnC 9B 0n4ZVhpqH4R"L@dZT: O@r>&#(^3IPmeSBu&h )YNKf@Le!i"/OeHEEJ̤@r{5h}az/4ʺdx`PI{b Q_qAhr?/%NORE"±@H)!.cø c@͑D:1b>/ 9ИrN+ԭ"=ީH1*r=z|Nay)4w 9NFӜ] { -EX:TfC1ОuP{pV8ɑ7TMg eínn;l޻H͚'C9uѷ](8h{I#b|Q6>K}c^޿!:֗ޅ[n'y,~\e98CpJq $h(')9N& O'St.NTsZh59yDg#1h'JǻK1k4{aEG6PZ\uik=%ٷE͛ R5Lmf&nmk݂S.44ju˟qFٻMjJfWsH͙?\M?[·#tј G͸rs3KA9/WZ}-$ڟ']=?TcwTz>̣Qh8tSk}*#:dS[ T*v#IӅaȏr?iJdKyFх:0 glECE֟ugU2/l ìX[oy>Wݯz \1&ZRJNb$:piy0#G sg`F ~jt:Z}<ƀ,#ԟ{*@p2$#bn-Wh9sVd3s1%a eUI^])L;ϔ[{ </Z(~wEͻU\U^")!(sW^;V_~q$$G*MA8v)eJ)r']5z:6g2XLZ3J a=٠ep9$qZ"RvKA^@D5t&(L lB7F9* Nz1L9a$5Vc4;_.Z~[_9u8/](iI-._ty[;[ɚ3ɒaQןeѳ%w3C+Ebh9/ruP&(OTrLbzwG~{`o?].&Ϯ{6 >}U!!9FNRrB"0([1r'Ej ˫ TR$HgA-Gp7$_ T:Rd}ގjgٮ,STVcByՎcq^;:cUrLu%:29[rMݲ۫gjN&fPܫKɼ&9w<.#pUPuī:RP$C┡^{-THƱ`c)*bTrK2v֜q3J9-L2j UǶP[pѢܷUVa4'k>w뉟&\P`mПL_ŶБB ]4ԋ#3@ئR2B\%x@e0dFs mM^8 A1TBwh'|L+定b0,9b;cڪX`7K]r޵$E;l ۙA?#I<߯lYvKbMجǯ"4b s`9 |΄1/KIcي|IHeӂzȈ )yTkB$^ȴcN#օ$<&k bևSPcgpl)xFԅ55bK(+)!RQ&IEyPRV.8!(<: gCŶKZ%%BqhfwyTw~ev5˸uD}"ىɴa|qL[/cBH0ܘʤ +V|$ԁAIJ^aXjC-/ ?9+)Aj4 Nb| t?A`,!Or'r0ǃ@$SB 1d,Z#ՁHObd+}FCO7Ÿnyge˴޽ks5U\vQ`tY/7tRnր r |/ ly q_9>բ#ߦ"<[8N[^.!5%sd sų%t\]Xz8Pl,edžiKkJlJw$Z6nq`'YږqYAOT^uI˭{MNnE8dZq@RVIMDhݖZ[lrК%-dil=9ܛ.f:܊z<0Im"gvᘰwY)Mtmyod) u&:srOv/|7(j֯8Nx N F_\v䌍DNt,28z d22ܭFKA@$)-`엘z]Z7νLYy]uڜ{=~Sȹߦ^'J7]<~QE/0eUvcodŴX*gHŸNzⶵ*~ ZVAl͞0'RIĽ։ceL, #Hnp!1n"e AZH© O5Zks "PҠ9A۪PkVJ[_a;.䊡gְgi"(>RRӻx[?xa 8$*+Epµ*Q.#E5H!U^MU\9uf_ lg\`#c=@ Aut5laV()5k: D锨ăCx ON4ٴ hrnKX~nG.FlB5q\_&X^ _\>voͷm|=|oXʹkܼ㞖z˥ܿ<1I7ۤ#[Ϩx|6W^޽g%pذʎ8d;m PDPQ.=kϓ{c&2S Oi_ ~Ķʮ|E>gv;ӗ?!F%1́J9D*lR] t>u;TKt"EhU2B0((3&=A."=nTpCNʊ:_MV1HL01o3 +OB0*Q }XnpHbjtϧwuRz׵Nc9us8dSg{j&<: wihf;;ukf^ tg"U76mF\[$]]]'~m67݈vmM7w~\/K#CUmR-w3#lom!}J Ko]0_ ߶1j'jXCB>xPcևOJ4I*+MAWMž{i*m9 p{k7lg˻ٚyg{o9,gG/-PRt2Aʼj) <ӄmWvM$, Dq;$Õq`itD,hac`t)rv;~t;Eo&ލT7%v_fJ4GhmZnO?b ΧoO`tLJjE| Dl+]HRNׄPoAGr& sPAWypV[rm"F!tA! J1r L} A$)KJLĤ`.E_"բJouLG8JDIA_PSTEǜf*7N IɰlBӨ3bugUn{k%C9HHǧZoQEM Če/N42!F}|Go->xpas Y5\2 Z뙅J#м#dOǽ%S 9z2K}Lת'S)T;SZs>=El%+$سQW\碮2w]]!^]BuJ3qF*l٨+$Ps.*S+XURA^Dƒ8-ܖٸ;x0ڲr7tgm,U sG/ ~Ût2o~QQQo ~oKrFj JFMgr8ZDʄ48B:$m< LP$J3PږK@o`"?ydrs? |W&ɆF0ޫƬb 176uȧs_}CeMMg.{jW`)Qoߒ4̓ALbދڹ{ދ'UəeRw}|mvJM-Ljfqs?IѰ[G. Ӈmy vsr+3fi%IΒ%C}%rg #BZCr8rlQBB^DF':QȄQ>q #F:*Q(Q$wAo!Z橏Pn*:*X*#gƫUMфjxs]ݣɣ־d]cߝ%̿  vtrO/\}{:ʙ hb9Jѹbrœ]V9BGՌRPSR8ez;q,tJ$R*&TR5c1r(DBUX^T>0$5',\>qoTv4| 9>ƶБB ]4ԋ#uN )Q{<EF Aْ\8 CjKI{DD02W.Fn&\vEk㎮hmkn)"4)s)NA(9ٻ6W<%ԗK`,+}d X㠯cIJ>:@{DQZ"i[SS"@0H6JU"dȁ !] Z$a&H:>p>fY4m[N})8hpl-xF55bw{[c 7N $Ǭ рE胅F I 0Tqc1:qk"V֋^/z8w&ObcZmާ=Z)&Nx!WS9Ys'1 '1?ILm2CNnkm^z&[! Y) (Q75^6!1mЋ> &!;BސZ顃Ơ¨M..hek]hH5v׊ٓ˗HPt~cJ@nry"Ee )Kn}L.\DrV&n#ӁY2j#D0 |5n9osΗLC}az*j/vz(R"^|9FcP$JRhIs))!7) DZ+ognN&V{~Vhi0tSDW X(u2cZf]DȤֆC@XDV5t\堧SЅfV`{-wLZn#kv(]>Y^qs[&XlnhEgpr̈ /ߙXBҜ=; vx=ۥ=o3ߙe*cvhLOx3\قM"5Ǒr9^ްżS*BD tңעʭlhv~<T(j8V T~ϛ7 kbcb&J 9އ Ay*O>'N5Jκ LA;2*H,g0EP<EM)c21֭X耈`F!rcJ23 ZhA.+CjQ={t3+z29vR>_(zH/a6ԱN-.}zRy˩Jg,Mh+S/b) rlj4)'/-c<8bȓ$/LU} e w)&`Q( u֗晥BÉAA#X*]5rr^2 jZ(ZާhR@LlBLmr`۴ȭnWx7O~; \y.{Yf|X_-uۤ( Tp߳r8rytr=Ka].[=;+'m~(!ܖ(u1 4N% SO5^"엌䢇xoxQooao yRG}@iw裶4eᙈkO(6j9Ik9 |01x>fW.Tn^=Omf%QԈmIW+Yeg9j tU eM?v Y& 5 )iR5Tj=f!J}O,h3bL,?GnF,7J zTçh>dD4 ` o8t?G~Ki6 WxSѿ؇ oOD1:}m5x * "͛"`0FJpt17*1㨟*1#s *>Wzo"DkhzbkH@H s\$)Y|HHECnnm^[#)Pa6$c֐|Bi6NIyݢU&g̡Ns4.Ip6 :$}A؜7Ft$T90W(E-rvK8'd2]'ﯝ|}jqu,gy {7#|L+5QLB4Cl xR4h|9zVk<:Q,ib[յ?6Y䥙1 ?z%ٟlȭ-z]kW67Ƞ!ܪY,FWa_[|=mڮOmy0Fk#Cq[]n{Eu+9&#E6 tVS L#JZ!O^CRSjsgW׎hFwCErUKP  it )(=# $\uS Zy9 W\?9gepDg|v$GCVQ,^%@:f )Lx|'BA$|JISi dJ]zb\47X*:+xV"Yf6`CdZa|2¿uitiq16R .n5Gn42ڻaq-ݵ4W׏ G5DYBieϘb+>/I+~1xmяհvW)rf8Ж0+y%t["Pi>h˙42H[T욀?Rb^ikuew/SZPly+_.~aGiG`N\t w꺍 ۬34VbAMnг{mD]~CvݵYk#8~HC7ii8[ 2.-ۭC B .F9BZ>a9EKտlPE.Z,_ź\n:x~moOb jf5x,%=j'#FQ dّ9G{ k`Rڹ$D?=]Φߏ1 _^ H|u"-%t^ 8fm[%쯳+ůwzZT$ .@]Y\};3E /%Ypms >`z"KvL+_Z>M:ׇozy}ytAQXr}BCD)-v!l$ Y0ɫ˶R*$YVʗX Y XQ[KExo0?{Fܿ y~?ƻؽYS"ee[{wO̐I>)]IfwuwUjMl7iAC4 )C9_}jCedz{t?`2~ >zy,b>L.v^]MƋWutd?~aMg+JnVHK5І"xgPPJamC\і|)#X-b?b @Q(굩"_2 .f`rH8+*֍4^EǸ..GqB`PP!fG׆ q ]^j>-8|EvpiŎmTqՎmXr]bJ쀣VoZ822JY,Q?>+z!XJp>K0jgs=؁&YtOyjN9kʺ/i~z0.7㪏ӖN*G3_mQףLf3Sn}Y'Q=m<[Odı^y6kڭ8o:׳Kϩr#b\&g:ilN{tR?r'a$w'_eIvs;HwhAEX~g<+,$"kUoٳC Ղ@8>Mz9cQ.^_:,cE02w̛ R1/u!~vVk٫?WjByQ.v ^}W^] ODN/͝K)EgmxЄ^t $\0sMGodncX}N4QA'LwNJNvrBu Gif&]0sb 0g#a9|75sv_fN[K +O ?M&׋E+5Q_ MkY\^Ws1+1-WH\003、@]gJwJQN0R큝4.pЈKʹg;!S5t6%N4%]Q:u}f\W.]]}wySkQSIztHJZN#9p'5sa Lz ]_+K:R}=V~|s^o6(&,`hָ{ ĬcO4c$d5pT ׮p|t%MX75 L1SΊ@Y"AB`> Xhe;!xY?ꏦc*Jc\FQmӄ3l4$G=U #,wLKo@# R^Aڥ*!H@=:383g=QНDR0 (Y^PpqIU>Vu*ο׮2yE 0E9wA5壒uZOog}x}y` CMKװqըom]]7>JS1_|ZBl<-u(%jZ[ד*"-o#IӫfU 3[ho#_$zNi='_*?sa@sMeS|k/ϫOoY1]G䁱ᖥ)耝aX"K!PK"rv.Swf2\Y9C,tf{iZ)5Y OzC6~LH1WpGR}-Y%>baGa!1|aH0Lvf_@}O`R'3Ji2sx5'z$F+B;Nj^,CS~aR=y!*ez~'I1W@=W`KtQQ!(ݴXڪ=k% 껊P5>/U#%0F2B@[fP=Zpkd `3k600&ң6O稳ctRA5 %v)5!mIZiп =(L=mlRݯjx:n>eSט-?xDɘTBaog1ь LH' nv6P'{"yLKww7پd;W Nw_غr71aZ2o<_Rm;)F]yG(ԮbsW՜2Gt*\7N16hT3/(5f&=G.0^ޡ I"V Fc0@T@k%G191"h➆~ #zT$S#OM!?w C|[P1!3ALQ+\.YiܢΛhә_fD)i$T,:YApA23=qf3g*l8-1:Y 8 cF@¹ Yp$QQYqh)VA-m=3{9A 3lǙ^~rq|µ 1 )w֧YU0wubz$qadULp7۔ xx, c xKK!IqE.(qCvYILB)W1{[Y/2S@aZLS\d';^d/2qkFՅ㎛Tl_xrV÷:l)ra?ޜV^;T-GAQ'c62u Nyd"0j3" *fJFg#TOpg&}Ԗx z5⚸ܚ195j,0g ef](z]xP](mv\*ps}*tQٽuŒF/nXca/l qO3MY h*'LQ42XdONIHs+D{CJǔi]I;'95}˸<;Ek^kv%N1Fa& dQH%c>'\yBJQ%yE.qVqBdXю  D91hXG&dLf}2W9b<?vՈ*FTF5*Ǟc0ޜ* Z3 `u K`i@A!9ڥ<\yjm8XG!1)DXR$,i$53G18>[mA/E#\%EY/^/zq%Yp׋ЋGsqNP_ S.Z@-uoޖpU9&O_"Q>nׇC>S8$J+O&a_"I"/QKű'KTCx}L>REۓ5y6H˿e\0M.t`A L+'>h[ ޑm=8ݕu?e:/*ٚZ P _o+1{atX>A7лAN3 _+'X`T9F$BXY#RSFDDlV[[%Zbu7Mi 4P{| w/K$^0O0Xֽv3 V/O,]g|``MON:JՑ6zaZ-tqIFEz-wLZ t sW'ћ"r<2 ^ȢиR F6a:6O,(:۰}]?Kxڋ=/=mmW-,s٭ĜGd`dNI`d2+OSppYyx@FgINh<-**(@Ea 6:jtH2Fv P\4'/1n=^B.Ebd+2v@2vc:>dUVJ# \ ݁{rPl'^d<̋yǢzh^"9J,}4g]T#+j1# : +R2L.o>gsTj m3KXOzFNt %|6`5qtLq$<2t)BCQraBtV8޶ ;_Bه"n<yp[2! y4uuBkBMp`p4ɍ.m~n~}Z*Py6{\d.fX9۫I/P='59z9q K/alO[_{G:}]Xc["lZ:xzz,dY0 e!s TtX[FȼJ (fkc/0$^d2a!+6K"x"r!̅ElA_1 &!])K\x3r>t@oЅlMZpZV@+Z`%~`)nأ=#tF47=׵lzu9$ah`EfɀVIO($}~/QjM@34y7$Md2<82zN]J{H'HHEry쏄, X,)O4{_RQ~]Fo*v8e(k۝AjOՑŶGZBZ$P[gۯ\K^; T18p\ǭP&\Oχ@y01(]y[~تV5@{D(&%r4(k屠J-QDT=x(ӻNT]G]VԚw9W*myJGv*hE]BCWWʥm^]ueVz x|9_{;g풂|q6ˏbP2.FhfԨxzΘkW:z:S yEYyT2r"ˤK.29s)p\?B2Oc6}La- aI= EtHjV[-uEhI) Bk΂&j?j)0;jwf|fL&of?y„>p W,|7_fKسh尀g^% Csm =UptK<&)xFUzj;J;hn`MF56&?Gw[ n{r:_.ݾFԬXG]FzqSQ|_NJfP+B$on9 WJ kfI>n44>:ƅku5]|f-i{kx@`%_e܃y3aˏ|&4{[}^?g?og&ހgic"&s$g#.?LoM_iB{#^M%ع %r2:J<|Jꓬ_c >x}S ^/ *72,(险鯌:2|iSI;-6m#~=.ar4hNΤilypllLτۛ\xIrDp&kW4K_x6 9E%ɾ>ʎlՑNVTד|-5GY-K./BoJu]|҅'嚞߁0H (R JQ{eZeNgb 0텷$hl|w| :~y6^5wC ٶ%Nޗ˞B3GKt`eMN&尺;? R(R($ךQst}[]5o9Ps88pkz(K. HFڤ3.4T|K&c u8&]Mċ jC(@Gɣ 9R/2K55>44+zX6iª;Q麟W}o.z3|~Ongpi Czޕ%3ecIl\ae&@lJMv돘OTEŒ GحfA`VU._&r\UhtGXYvYc :0dZG&-GZ;Y2t)x1hm)02"x.pDb,pU\uάę'L q}wᱛ";qn `{6 \ (+Fւ>R5!u-N;S1HӁM]2-"S7Bl-69),\2샋O &&p".jimoOu^8䄪,}A"0 B*.2!2=x` ̕`5rC^ kژcx)>o.ڳ[` }jQҽ0-ixJx2W)]zzt~k/Agr_l]|JZea T}r0PZ_C⢨g2!SA teZkGUdNϪQ "2RnȠs,\ z4%#% 2KorC['J"IIe+gO<&$$Fׁ|`VȢ,9aoKb jD[Y#ڦFx4Z8 CtY6Br ϵn_ g9cI.k%虀!ZH:in$H#[;9'Ke):qɹzU֋ŦG9Ôˌ@=yq- !A‘]lfCӋЋч,}XM}*lr !iJ6򤱍?rn4>$xGg8-t!؉OЅڽڭ.TBmK e[x_VJu. 9FNUl9Z]P iy6)<~ˏ7|C2`6`xMB  s`+^~iXTUSzwڰ>'"+Y:f-B,MVMMI{Kn7#]W\&TT: 7;=^|qP1'{N#{\s;2F޶&iMHx@UHn3Z fǒ\]8O#_5Ǜ/x7ԅJ{ƺQ4u.ڟֹ$:nx'yor[)pR!6{d]< 2(0>tv[+i$Gfu)c&o>?NG-¦O NnAש 8[Y=qBt&v$v!4vMhBKPrxr=L9{$skNiDE!tdd%aqR{m'Ã%u'd64~-j,tcfl§o]QG t<.7O%[xqZ}i }ێ=W;?/qܷyo8?,F^=~4x/sѧ{ wu?]knot|8o;̧w6*|_!D|6+FppVƂ)meBNs@H63ۤA'#9SUeB}J J0@Eކ2LR hnXS!Ȕ$Eᒒ2sQ"@!Ҥ8I-Eos6,Id068, EG$&}V!TZŖcwL#GM "XaJ> %HN$L4-*ϖ1<6k4%#@6PD.s$&>̌'Q H9 $,b&cE%-!kG}ChFA-xpҗn_ ,mD6GΝU GPoF̜ Ad *. U rly#yrxLMNyJne8X4<Z<]>CE4ȹ*"*-(;d0LdZI笑]h2"AKV.r{^.<7]gknf ϻ4YѺb4Mg/y{G{h^| oz7:Iз9}?32;ÌkVxֽv[ANdb*Wxx:B ad$ ;k<)ֿ͆~frrh 4 t ̹ --`Y&0s-u I2DJ̀P"JZ%> t\Pǹ  ~V?D(4Y' x;1wminovs *Ws Sxj"5GfO#^1\^?w$% oldeBlSE $69cgY n٧We'Te'?`1JBYIt\$\dB3sD:AΊ+u{_l|ec)̆Y^ɳ@o9-ǽ帷r:ڠ#@Rqo9-帷 կ@*5-iᨖ4ڒFjI-i%6u*Ғ'"-18і4ڒF[hKmI-i%і4ڒF[hKm%^Լ`ܧ]*fۧ}Js>5RwOٯ^+i_gט-0 $]>u9+ QC@ZB.].RtqkNiDE!tdVHE$'&8x2)m` s||{'l3o|Z_]!~o쑭7VWaoh#\;QҖ[bvm[]"\\Znz|{zMwk/04 ieG+ }ov[9%V5x OyBۧq}F_=&ܾx-co:m5Qk>2ԨTPGڡ,yn-8J % Tợdw: /J3mVNϳul]%l8s-SԞl(7F998TET>i `<Ȕ1`wp4=H#Σ&zqYƔeD=,h%*Pgҵ9lwȱyǧ=;![U_ %%k7Oo.v[a,8V&/0ddy0砃I9i O2&Fr !Je20`譁$"o R1 !Tjz A$)( &Ijdd,z)fI"%#e:LD}DRlҗq(}mUBDl9*I0/f97)`q %HN$L4-*ϖ1<6k4%#@6PD.s$&>̌'Q H9 $,b&cE?Ң!kG}Chkm#WEȧ]lX$ ݝ r6Hrf[d!ʖ8VSjv)Xe1_|@e=23@<@ls?ak*' 7Cn 0xm\.G|Sn+֫OP^C~I"*D0,Oy1Md XQIk9q@j͹/7I׾1GQ f\~() ö3|O^iÄNxvܾℍh/|㸔2lj@y(0!t=Sd6xd0 k+M>z2s{ .$+)\Z)P*i{]B1'˔&`_(A=JR@/Ԯ̄ js"|Rּfw^Z^i2]x ]⑳++덳1 8 VSLSRrlh6I(li%d9X]`,y%ӁikfWKamr+V5qHJZm,zq\ܵ%wUdVGy;,mctH*ǚ4/tZdמgz5/Kkkp^bqu"|[T0g8qj\H?_[эW;Jw؆{:ԍ ݬ+_cOFbaMr9г>gSbcmuzQ+MjB%0?_8i8ȲvP\ɠ{b glW>~~4zjdr|Z<~Ůx1/#VoHJ,}Gz5)+mlHu` `MORxe|hi?W0Tիm6q'V*JV+cSyH9pmeUv,@xA% )Kn|L.˨L3 8"2;&=7ޅ\?.I鐨##ė1 tFx>_riXi&ìٜ~gC.5xD9NEFB ǜF8!h[PUG%"]^!{/}B%sUdL!g&a<4{Gg|D#5/UYH{;ǛWeFL>|ߩe݆l;b%*<fAiQ`h@ }dfFqxJ`\Z'Lцy O.ޟȢ/FoXR{AZˬB!y9)Ld$DRΡg dDFdWs0Fujf?jBv RT۳R2_ŀn߂.4I8+AS1 7d"8ы8ˈ`y!zd<׫=҇'2-lNl&<ϻEZi.TH?YHEńzݩxI p>~Yݬ`p"\Zڭޫ;;nFmwq7[0r[:ZE-;u)B_=|,\bzOwzt8;}MkVZܫAa'q$yF/"8ѫ ǘaH0[LFiPF?X|TšjȄ5rЫ"/[ eQfmʎ<ؐe0&sff7?ߡҞ'AkMR-(6]eW=F~T;DsR7 i 8 @STp=K%dY),`.) O &&p"r4JuNȉUK `71B'f * :phUنA+SjD@U!֞F۷3b ?yضܼ >}>W\:*$1=_}!jS˞]qqcOnFtwg'<^A٤F@T3Ys~9V^I"q(gYATZ˯c$ ,'2fΓJ9-q6$+آ^uJ.%TYf6\/q㐸(bu2GwV:>b3#C #dyPQ&"g@xTQfZ uNid(^&lʶDGZب+Y'l.esb)MZFl?5`BkiǡQg[y>DhvVHdtR(EsΤBJmH-y{@$+.x8L̰$nHA&MX T Hđ0>p>fY49W[~y ZXD4#6QqrB$)5 kAq Ћ:FâRR[@B-$f g 5oHn3j. 胁 DHy,ۃ qk>&z՞pqLpHkմ䱸h+qŭ/K2y&GgeRBȲND.iQxhO=a;u99rj?xG؅Qq_&Qp&"oe0Ja)L׾7Ûɻ7'?_.2 Ő>9MOȽ ˄ӯ%b&g_ӓvY iRJvdd$IVz[;k5qV{syښA!?&C\ t. ]4xm71y5,o5ڊe hjavWu2 ,Z^t8Lߙ_Ldz|O_~t).<  m;\MGbT'\T__Rի_m|P妝ǧ'kBs _%|o}v0̻D"Gip0pDD4ۏ~n%Z`֨_ Lq-{-0MEJ{a+sG-.fn޵q,ٿB.Q2``$Mpq/.^5E*ʉ~fԓD =IfOMOSUamx@*y1O'8P-nxb&IS:G?:m?NHN7z 05+* BZ;@vr8>O+V_#AnR_ݹtNM_I'Ow5pms:F J*"d RF{U{BŽrQ 3eVҜ e^ڧ^U^l볟W %%x3լZ)Q'N9 EfÂPlֺ`:&xs`.#2 _X&p9E)9%ZoNܹ_AM.$ʻ֮&q[JKa{uMm](<`db[[ܱA"l(&8sIDtQW8Ȣf$dT_tRY>gDw u$+gpj8^c0%QdIipjm%BחwD~ +&gzǏ2[Zn`]1ptMAe0Z(*F!(:1VC +$g搒ͯ:~PEүM1"uea%UTٸM0,d1}("j bH\(;y("X)'z|5ެr?>gU鼦ݤγR9-v]-s;ݺ6vyyZA?IoVjnyƍvL9|q]y|ΗMLmw4tͧm/'Dz'LJyEhYkw0iHu STqcR%3- i^M7aeFK[~F>9}Gk^7wW&f6amYpzcM1pq*Z dK\2qrNNx:II$\][eh%}d,C3`O#nƏHV5qa3: obEH}(Ⱦ_u]޽{@V4ogNݨﻏE<2v'*rxE[ӵiIl o핟 _>upF+fF3t^fOJw5P\ᓟov-wb@mofm+S/GFXb>\t}ut)=|Cng]Q| tk%/|?O?NHM뚭VP\Bm> 1vO׬j!.ȼf:R'C/:#E,E_V<(Y]^v꼺8JUtbh;Ϫ<=g/`a.rGQ[M/1U^)ˠ^~|jcKiwLN?"_@QlUeJ1Y(,Ef=Tq>H %zOYf(;dN9O#OJBDV23Y$^HG$RGt,iUe6X2pR!8 řeIܗ&ܩMg؛8tEދ0 _'`9> 50-䯘˅ 2adNLZɌeկ'hkbVJ]q-e/"kR0>lCM݆O)sTDZ9GCzӓ=>ճߧԾWf}v"B׷}ur$om'Z ]krgwGʻW|)^QMl^CLFFSrR'#N: %zfl+2jkJB ikS\ hU&D9R 1i\Q6Fٍqް7 MXG,<(n$g[]3ޒJؤrؽU_q5~۠ٴ^}5ׄRI"xTIf$g( 3JfHVa g'd1aS6gdKoǬ+YN }/qv#vX5s(ݛv ͈#}xJϸ 4f&)A{)\(2%ls`r@/:lkRfQ,lH3E1RQEI7qvaԯ;0"툈#"><)[L(90Hy^;0&Apc+E?Ҿ=qe^];dNܼ߿y==*I*W ;_FAZ6zF\<=$fG5yF2R:e?)VEgΜX$٫ @i閠Jn҂tIpmb:2}ag7q >Vͤ4t{Fc6 4QWv=xqV/IF\IGS~up2Wd|.%f#g@1IF}0FMjf? Dv $RԷg7 Ć4O'wvz?WʙДdŭE`^Ddt`@i)c:hf)۸^#}ZRa&,9 Cd/j{v,W趡kPx[.[nsQ?XnHD݄yޱټ6{ҵ/_:St ,R2@]#psI3ޮG[5"3քaœu_2x}_/[Oe-F"H6/t^ 6y-, rZ *?|oqy元Ǫ!R({\tJyn# 2>L-2_HHF oGo&_)~K>[,/'UR؋e~N/?>KZa]yjLq~Fҕ%LpFn쬙o9C3`ȾABvvMom޶<Gچ LpjCz7hLkmcGE ^ 0؇< ,0s ^mmGߧ-;6eVǯȪ>"qv}uCyt<[':XT\/ZU(jꍃԫ۰៯hz6O?]!ku9x`Le5eDW?YC_T)I0EEM9p/d4sM%H6Pi>*$[x:bɹa CA< 9 _,W̜c l>_-u~+9̕=^ӓ`艹S3rӜ,@-%m= 뭲 r J)XZ,DΑn@Njn݀(ЖT QII>uTՀ|s$L#\tBƪң-%&wAdptN!rg{s-RR|W<9yk^y=g?JW[4fԹi9D t`%'k+d-UkYKʍL>kd-i@)Oyξ'x0`p <>Vs}IQvo} D)<4bK}ȿF37'1+.y$ BqQVJ4Jnf$ t2-AVv|E؝ŧX8;޳­fKWxLQ1֡16ͱ c]/=}V]۱/f9F*ts9P `9p>^СIXX~O~fWD+ [+ٔ+Η+VrV53+2: <ւ:\U+•ʹs+2?rWZ]+RX=•SZT\F9'KԸ_}̳ziR|>;vQ3bq83R?X|m0Ŧv3BV2XIs6Zu.ZP(V+7@玬Nճ >28-\=S,Xπ+K^oW`#[g<\!\IcZ\j4WcsUrhGWjpU^jL]*okajXs5iaw.^ar 6nıha9J&F"\g>*$G2 Z}j ~?D?fBR/IO*~s7-$;d#z?Z0{j xX^X<w^S"߼|ykXf򱾨R*6a.>'ћ"r< Y43B*R!Y*$[x:TJ, Lbt< 9 _iY< Tc[i烳̫b0}b c%H1IZ #9lD> P$ۏ *@)ǠD˼EB: tҠ\=P=DA'-=T?ZT]6ˌ]\}1_2VG/hpJ {T..B@ɮЖ+E;I.yE<$e%`"ˤғ_l2"hE;xp7$mv>p<~@471 yp'h-L0Z4qS胐9Ϛ7E&C#`̺8E?kQJ(b@ʺ@SUU@mf/=r+Vtߤ<sOIAB}zP<_Y嬶8k]=8Q$RILFE[rQTUɡɠB.ȍtVSd4ΖD/p=M.%$AOuA&+h &#}_Z#c3s܌4f=XhcX@j~]ǧMZFhоfqoď㞱߽p~9/h5> w=~%~Ƨm/bo=~N}LzD+6ҡV;澀Ei&A1YpNX1@j.A-AW[ ȍ.*YT M w* y_86^hf΁1#bR hFN{WFr O #KK--vs{6 @cѯ)l!C8$%DJdNEvO_vN+tfum֦%Q˅=%p H’ҫv̡]Krb@%&b,7b|J[1 ^FH WERHd+䉃{t>e;uR"08X1R(RN2G ; R Rd ]sv6n|n;-9tpog~UM.Jw{mMժ߶$"tby \waTb}kl3G_5=fBr8=ʔ1 ѸsH%A9"k}gZXEY7E? ԣ@Qמ 5,Av0EF լ߸Wh(c(n͂{+Dp:F/+盔֡ Yp^Ȥf ћ=-.-+=#Ԥcq󄫋]dlX݌ PpI@'Q`;5t)mjQAԜY}LPjx3_uFmaX2f 1\ܗ7Zun)Rml) ~7:faMK y`V-.%]t9B&3i|88;}M|{:ؠ iSd1IѫȠ)u" !*guB,Ӛ22bo:1zECfi~s}2P]k5:sj9{R9|O8O 7uQSBTC XyRP8 g:N T'QeGFJ3@ReR\Ё)K|J('ý%U618d)u6잮U𮻣Ρ-kWX;;aߐ ۽)բu;=m>=s}].v;=-dG˫w?@%PϦ=G&@3o=u]&xh.twXC Qjϭ͏Kcd1eTM-W ݣdšXYXaJW} ׶Jsڞ<=OȳŎ<㿳Γmvr#mp FaY()r[:mu2F4fGM= Sh:=RcgPjt3+5'arYR?}f-ۮdI#EUD|6~8\Zu}X Mh+S/b) t])`RN^Zxp$ '#=XiBS[([I# uKR)iДh,v'p L= )ה(E_(M̢ɑlN¦% zJ2 l$>& R멛OKiRLƁ&y0%[f-iee)LKq^:Q>Yjf_ơ_NIȯ#9 :(y$!.̌#U H9 \eb^NHǦ)C׾pkC[%#ԇ$?U1ŗ|5L4\dZh:u2J{o),HP^,zdG"壦IeVB>'(!WV^yPr&>q6|'Po2KoЈ yHKBT!e:'YR1d- ZID[HZ6Hi .Ͷoip(֋OѤ8tƃ9+gikG߽﫴Żxirx $q<5iѺX?хeG%˥¸}Mz.ޏ^X2rڸYӻ5w!*-.սvE_{H)K! -z?L6*.'SBgmTuנ%#!@Ԉ'<뉐fmOA%#X dpRg"fN>IֿM;'<0"ĸYd@o#F Wi\<7[~ug9vA:E`G2)K3 eZd_:Uy?]uحűGGAGK!šȯClT (" hQ( pIo#4~u !lhy'go -HitretN\J{Hu$H%"ͫE`0JJ+p+t1WqԛOշ^bգe"v=h@b##!-Lp.\4w;|>oX62-E$6yS!ӡ,Hc=㉅n=^`ƞ딽> 8`QJF"F&!3&(-r-M+V]l{v6:M t[N.w}ZR NĂrc] K8A$)BҴ02:=VtNtiq D9g̱eZpX!1)E\@ORj+hM˓̉m/Gr΄0 AiY0 ,g ^UT9%oOcDP"h\ vBWRl/;(: (cY(˙ưpK8g%3J-}7QfËq ܅MkݢwN\' *~KRiӏu^.VF#SH/w7KM28TiTEG>]r7oN?|,{Ƥ pcMB1\A(W3,=ϿgLV=(.B:ˆq iE&jΜ4!76–Jb|f4}Zy0RǸ2ٻSl9￲gB84YU 12K\2qv.pftG_fsZt,9 M=w]bHvfl>\nTpayMy~_'.PZ\vnffEwH|4+M#nMN۽ǂ3^=4}uk;ZK nL p HH$H2jPhl0Q App 6h,eR*$YVl,nwT,p%"7Wm] gKKڰoC੟Ue}iGEi껋Sr3~>z81Mjg 1GEu͖:iMkL 6Kܚ1܎;m _Ч/͋u ]`-*t`zv_f}Xg`O5ezAyw& $QVt^AquB&5~>o~_3ˆ>"sWw_z5 ͗|wؼ}o 7up~gۉ&Ⲩɺsz0_ 3!: ke. :gБ]o?2/uΧ_O92$I3fA)1)"+mD7t UL=|ޭif^9=(duk:*-;,=yxl畠XunHKX]mwo x`Fh:"b 2};mm9),\#cD$ WY.b!RL+oK5օ.{'Te#/IEn0 B] d *ȬSa2WGlݤvFz{i[wCeXօ>Mÿm˶Ktw4Jxc«tdi@O~y7ӋS R-v؁֪rd2K-*QMG7)oRN忀ҳT"H97fӄy[W2,wX(' )P(p(&řeN۩M2z&΁;M~Td] |J50)%$x(<4h’ipT5xmOO/_7S{"oHUޟUh &cA'PEZ5 9z"j59fE, Ӄl]c9)b\KϹ*Ax"ֶoajx-4mnYmD. 2.9six9_OР7c_b{ 5y]$*I$plJkAި0$' C]SZ8{> &Cdl>7hHFd2v̺\1hlZaH[.XjXiV!؝8+=k'$Ӥ4R(x9gBn+QKuYC` T%5 Y#WC*( h=&~{;,b֚E"6k89o1F ܥ"(D֠ d,KJIbK,gL H6\&";RPF I 0Qjjsҫdݨ\ǴYm<.v5O!fdq00VL GD@?kA0a}Ap-~!ǟxH#=>RMQnzlv̨ fџۢP?O6\]Ssl[A9^JTZX'98^1Ї<0 k)ˍ.@$@i-A* K3u'A̅+mb:2}fPM?##H8)"oX{ 東8Hדu%ZƁrj}B t]f.laJu WvgDƵ-pQ 'OG_x+2&r O_q9p#(ƶ3bOC'Cr{s۽8{c7t/M[M+5j婎<$`ٟ&Rú!SZ~T}L~S2?}c[<ϩhCtE^mǫh˶Z-{t3.VG@96|fƜMO=7Qt==L;$?swh׎7Swa-Bp6c1˿%rOv/0OLcm;vg>S 9`x֣u(yA$J; \I">(@HV0-!]{HCǦwSh']ao7SMvf@ava6yðas͋\ ˫b=toٰSqfތ6nju#jj6}ǿz'<_9tO")a<[E Ub+nU_1е6JH;'L>AEk/{'.+Nk--b)E`:t,8J GGکR#eOb L01J&&&13MIZǵ4IfstnI)) Qng`7q`75BwS.FClC\v/댋*mAJ.7D9S$A f# F YIKJȐW14:εs{-:Fa3zn*jIrd˜Ug?=^-y"ڛկ}5)"~E,_ɳNA=XcR %!: u2u!xrVJEdBsTs|{ 9H 2).BC봤?0O6I`d!.)Q,,tpR6f)9̉LPz&teYM=DZ8w5V_nYg2q>zn)@U.|B3 # Ac.owj&cK!ͯ2vor|(ty'Occ{:}ݢg&zX웚4ݿOvKFpRԡ}wټRܚoJfG؍L| i8OݓdšY\;+t\A1* \;?g+q{<[wDپ;y-1jOk(7F99޸Q( j `C eL'"LCo $I!xmRN(37sr,Ae]&΁S_Cck frFP"/J$hrb,zQXYJ`lp f"&I7ߚA1SusȸV1M8r}`FR6G,`by3-*qIh'F^Ķ_G@6jdID.D33TM3/s.pIYLf0곆^C~>go mAm;mesܤ| u? .KH>CS=oQw'v8scYcAR"Q'c̑S1抴ݘ+RƘ==cVW' k|]z\=MJkW}*UUVUҘ \ zEٖ/ uGUY?ޏ>lZ!&|-U=e_>e4-WC#MwTEHͩʤFR˶)"A"EZ }GiR3P ֤Z'-,nˍo6e|Qɫi|G7jl9z4/of-$~/\5 j}%-l;~k/ZCS50MSqÑKZk8ӶlNDžSĵp*-ymR []%} C q]vv^b[Aӭ?۸6{(mɪkl@l (l{Yg]z.Iy.TQNfџo?ǟ߯wbW݅}>"g٤6vby_7r[? :qoZ= 39]rA fY XTR2e#*.KW)\I=XȍhЕ~l1VX<㙂F?b|,Ƒ"(0 Q+b# 8hX?=F1$@<'_*_:_,esYGKQ,130K9Fp!P}BZU .%YXVʗkc*'4&dὦFd ;> )FKzt͂4rdT,~_p:EG!8$:^,_g\_j&7F;-uRrЍCthL7#57+=*h*fcV A66Kboc&vp,r OuNȉUd G $K )3zJQT8{®p;H]~0ֵ㥃bm*nThC _%/:5l%0dm3W⫚{rh/? $jMj D`)n {={>WRq/Je^2VZ?!IǢDkYN9O"*pِ8 bu -q!7fcTd. JϧC"ÁUӁB!f/7Eͤxd7q9VdOfhRn%` fsmH*Y^SLNx$ZGO6̄%j{yu&Z:?cvֿ}ɝ;[Se[ŸprkK'J**2x41tNgDi) thcsp3I4*29נ11K.zR(N0"M.RDqeXM= V*հf2 GږW=,=/.º75qz2~yg #C #dyPQ&"g@xTQfZ uNid sQ0 L ؔGZب3v,&C,hS;z8Ϧq jWӎP{`}L7 ,IH "Xr9 e"@6H#yU0+.xJaE̐YA&MX (08FǬ" &*a5qvakԗfqy=6ҏCTFD3  i<"IQX (6z@u EDuj g 5mHn3j. 胁 DHYtC(4jg7"^_$zԞpqѬ7W|մP\q8OaʉeA M( ?8ʤ<~Hd !:mLfÀa-8{j|O#@MrG@2}jȝx x}WamApCD?j9; E୪}/ B fO0Hz_8Hz(  hyC`}Jf<7F{2C[>Pfnŏ㖱܉xy1H Gmߺ \|-lf)݃1>Rl*%1)2h:2&ά2\g=ĺ)ULlɍ'3:˨ HMdq.3[QM=cޅ/_C;(FJjl=eyq|F٦gw{rCO^)PL@6 !3gj#/gA(Pty}=[/}B%sUdBL$ &y:h*0p Ybt,(`6b@$8RNо#q=Wb"h.ԮUM=/xw:bK H?/ɇ[4ewKD6 jۮGiINAQ{G6d4&y3ʲXY3u>rY`;œubh.B ^C ;9J󸫦AëݵFM2HY ƥo<Й5j$"]J*oiyuٛ҅tԋo兽Ǫ>WKPWKg( fE^HRYz³ϛU&;FNq6h} yҕYzk{VϬ;C *S@GגsYNۨ"A tBJK+sqk[v`Fy,0-._DGj91%cF3X\KQT8{tF\-J^ƏI&r E4t&Ծ?}7m-?:8]O;^ Ը߂_;E۬kYȃwW>^LVS֗՟J`u^0߰+UxNTѴңQ|vFAgmC2WxR;Hij|2_Ғ=mk<!zkg~dyo} bE\z;n}DCX9JL>Iu[+%ph,qdzI6"+)B&\|eo|@٣=@S`3=R*ɾ6%IM_2Zo?#S;YҍmѤq`GĎhx(n1 As;|l֞R @_Nj$"MI4@Jc&#>YN $t)L,NUa!:|^wK"^Ozq:FЩGgHBO #wwU ,.I3(HyC)ilQ6ezp_tU!@*ZgTwe#ˀa(v|ȵkh椘G}Uo S/:#AۿN/ tm;#z&%I+:_7^LFV/uC|ݨt| vv?,`[kUmMɨk; ٱ¤ΉDE!H}xY怲HYO;B ){ 47 E&*29/ LUj*5\ұ!xBznkzoe۝ ֮.P+^X\-AZN&.J195NenXtƁ'8n=FZ#˫).Z~.z:F2lqJW6 l1rL>F_c+r9Β^ I)?ӄ/I!=yY&kK ]cRrdI1[G1."goeMqh9.m5Ci(5B_&-ҹ Bq}ױA7Yf+7+//3l;{"^Җ}'uEh@hw%b d rfɤ-~UI~}7i; ?o}uwߛ!4qs.g^.׻E9ykوz[3\,|F>Zc%buC?SlΤ<ǡ_M7Tg49_9tAؐ mIGMcI7QgQ`v*5bIUЈk6ya,NsrIdɞS kz,1eJLl5,7tT!VWY$.l\Jg81ƐJxW-eVw+]{m.+5Es5uwUb-(B1p̥U0Rqel߁փϭ2R4%V`\}xrk{QU;U)0W ^44pՙ\U"S{qH*cZ%hE,0NfѤ9̜bv*K0HvPiANwBs.'FZ+tvOs:d^Mm&z{bqLPo'rx6yCCqrx=I/c qvq`Rg&u>xr'H1ouy&(;|mDh]bxu:-eլmYwe19τ(\X?^/f|'_~ӭ~O&$O{y^6oi|"m %IB\wIJ'a>/gqd9;)S߭;#J`hrM>ʐ(1al֕ɕV #r~^-Mkl/4o7!ԡ&|]p~m[^Gym{kZ|B1i]_]Cak/5 ͽ1ݓm+ _+_j~7@SbNn9 %__uk3 b6բW7li}}Fjx$wt640ۂP\f?j:M?=tqUG]Orhԁ4V]KR CZfWƺ]mEvU!iM`#0Ϡl6u:#zUxrB>Ί %fxHݛ׽zgQaP-_@ 3R D)"DoeeF=±{Y gME#tvq1_1#_hWS|A`i8 U751GUw^C/Fy 0/'`čT+O,A~s~1 :L]Oz[&:xsۍEyu_G8Qf*2vN1AIH,~*^cƱvu\TPVTX Q>?R":[7]Z<ޤٖ**씗>[K+ITclKNeHjd49alaxt<0ptvksY7!i|//ciQ*̱dl% g-a]pZOPGEyWt㵚Mn߾e|V!7PZwc|L$Ǐol_w3!s0*Y0Wh-^Fq/\5r_j/ķs\5*?o\)e 2W xϡF/\ZcաFGs-+=_{ w](w ד7[{u>&k`SZ&?ϛ&ܜbq?#;'RXH-©O?zDB`/L7r})fQݡiPnVSH!@\$U2,V(ZXy6 aB˃;Y1=rN"\}2JHS2p2 _utg2H$h\DOg !.OZBwLՀwb c\ ̃V;vX{LbX,U&p\00'[@L6%")Йf؀(Q (9k-^Wi_iJ7gQ DZSvzP / Q<*gYXo){@_S.0PSHj A]WlpKA1QQ@IQ˒ v`͋E/ڨVn=GXp 2 BD AEjLb`,)! qۭV^b-|IZ W])!4a tjC ֵA݊ #RdێYeD|-b 0Q}"5huly`}#L+ُ*j~'¶?ގv%#ҥ`ru+H1">X5`cJMI`>)I,I!]U/"e JQe ,Äc*B|-̨" ƒVsz' LjyU,C Y֠0nak(K}辬 kIt u<o o:aQ,TW!>/A]ża d- .k@ItBw ;6gtBK.O&!The1( \{#Kt)}nb8AbC8*KtK|C0 6BYG\H@ՁvꡔK p[O κ$!;V@@  (-f]m!Q1Mp#X< ( g@@F25;QPQLMF"(Q] <(@o""㬪ZUay0* !^$1CdгlD/B DUb|k?`5JʐڳF& h%aD[ L[ v/GD^;opV 0s}tВLZrm&w 1ڹ[gr.I\1 ˴tZ[\GIT[5,V$FOB=V`&Aҿ( DnVۆnMEZk Q$IekBl0&<9 vt:ƨ0#I*paRҡ%z@mk›#* F m]AףNJxt A2# 5@JFOȠ r=5ZD}z= -YcW,Bv!+q&SqrSp wp&y/aÈIV( ƈR܌EEH,z),:W@QYj0Q3b `NmSƚl,YHV -bQPAlR>ɗ U5e2 `2D hs`uN^Y%9^( j7[05Jȝ5zA`-"m@naNZS0̲Mf=ʤh]H 0"ȝ1>d(Nƞ +BO`XRJ ]\1FܪH;SkHCzul) [{3KC$1ˡpx0ФU܄$Æt!c;t\- "Kp6TZ]wart `j?/K i?۔lkӈ~Fu.3.f:ˌ[GEpz[O< j'c.L0Bg\u{PM[<$G͑ sp*-=&`㡆 y@Fm < 1y@b< 1y@b< 1y@b< 1y@b< 1y@b< 1y@_(x@$p4qc?tIi-Dyl1y@b< 1y@b< 1y@b< 1y@b< 1y@b< 1y@bX~LA`#A\xx@?Hx*Q}< x=-< 1y@b< 1y@b< 1y@b< 1y@b< 1y@b< 1y@Rx@ 땽bzJMCq}.__v|QuWfӳ{P@`,:{<%̣-iK -} F+ly4ĵ걘+6ȇn hQJo]%0ƒa{vZ~N֝dqcڇxr~z|&Kg ߻'c, ]A'M[KK4TT V(jUpR!Z %on.SQG(;Q96b3]y//Bysh-(]"ú oM .19{4_|T^vߓW)㿹a\awMf|]%MUG]}^,7hzɓedݹXu^9޼q;Kx[ڤ'uˣ؛Qvgv]QvEaaۿKNϧlw]Y/`sONGw-4A Q˲"M=# -aNٓà1AeI ڼV1 )\oM ~(|u5ja|6$ҽ~Nmjgez*΅6tP'gPG-A8\+(;O_FofG'z?#BY?P$.=?GYz'V:9ݚr ZxtWa֎at~CsMjc#eMS L:g*.>xZɱRV19BBAqușl4Y瞬QG]Dm1d p`NHC8HgQ 5}*tD;9tL5Eq(g.bYj<ƝHVoǓiZ-@k>PXJ#g*֢e5KuĽ:*e\Xd*z4f>W,s9pEz^9Ե됯L筜擗SJl*RHp1cOߞ ?KK[*?~;d(]֓3~Ea74{ayTZ4Vo/7+1fB_̆!fglìnVK'GuQT(syf +Lo {oitPFv~[ٿY]zػz W`CF%LTG "$P'%J]|ގۃA;gp%i7e*BP~CZ\LַIy4klu׽,U(u$ {KoVmГ'hќ4{FϾ,sʟYsWpн l],@`^hT%_ ŕ.|{~jm |Ygby89G_QPOgä*M{Ԧ!vIo27zwf[Qa6 =HHi6=z杓uj[Fz뎶hrnm=td^fQ[VRf/H0W`!b9z2+G R=2o x7+kac;Aa׵) P-?*^/ThlhMdi蒮zc"%@ MQ&6RjJ1 tvy1$쇟˨ R)r/ڞokRu:,0}"@͎βw [XbDMQw}24"fE{UE,=wJ?40M'.aZ{1F l'A[+;:7Cjr͌{6cǚ?zJXu' ^Q94x_( p#*M)^ XIZ9)$-"oeiW+?ޜ=Pzktsg:XZI<բpm nQum-knYtu!m+$TQwݫH-Ye([RAI ,#TyCޜFuc0bT787GuUu$Ձ.tSͥUy]6 wu/||[so9<ҕamTʪw':wczti 'o?//Oxz"D*TvYsFHI+eřu6Xre.͇4w ë~l 1w_DC|ϓ: hGr{3=2H'SΨغ$MVC'aӉ^ЉF>,xCEEiSj):N%Gg W+JDtT3ơ#bĹ#Bw,_!ZuUåRy7!*ց ^@!g& R諒ptKG9W#@m2%Aw&U'z]:~7'oF&S%ݧNO8O{O<B/E- ZM=vHU+f\5 z =$XuG&]n<j'3Ag7C(v':]x0'}gS+'7Esw8)b 2B;bRM:K&6:0# U ptWd q̈Pp4qnKHm=PuV^_ A >5n3yYO`6 ajFWϏppKuQWnQ <^v#WW}h t9#vm"^T.sXA7 nITcҁNhE1-F!Wtꁋ󕼔MUkW *[GEԚ " c-'εL/&{*S?,bG>2zM[5خ'xb}~x@}Q}V"ľ*3 Ń"ndt?f%`7#|Tyt},A>U[REf2&W#]Fxu4qnu,m]L)}(䎤l]ioG+ JnEm^d82#U". wZDm]]}s9(]ةrtdh999B.szJvxN'9"%~L0#0&C2bjCa/l qO3MY `*G%Ø!Y^* C2'ZK6i5gh/#vH2y1-b/=K͈m#bb jgڲG)h Є"j 1JIyb0 WmyhC0ȅ4*X Sh5> 'H !ezE=r4&c2a6q6aeԷa "ʌGx9yR 5@y ZǸ$ &6 ! yΩ.͛@+ lSi($^2XobA%B&LR#1sTK-rxpqZ6uH{fɮ3qŵ33I2:3`a ЇRH!^(`>p`0  B9p߽pȁy5jܪr?d k7q 'NF55`٤&Ng8I>5qR>5XxY{~T1P\)fZrD& vȨ, Kee>oD%aR/*ݒdbS,ёz)#"bS]+CʘtMll>ƕ~ FÁ]$`=0|)` " XbUX(eZmFYT-},9GFÇΦ|RS`l"ݽU-wӤΖ '4E !k+)OXE f@WQ˔''3CO'!TT#8jy 8近g(]D)^y O Ӽ?]4oO_멒^譎E.łc愰# 1\zK!BШ{Ϧˍ^;:mG&Er[vY@hBfK9@<)pvpD53k%ɀ-&"g,R f <"&D9kH]_կ.b%ӻz58#,Hlxk?ON F=~Sew]9-]\qۖ^MOVF0῰.ĸbvӷ?$63&7Jm-޼\&a Wh/>9ݜWDDvEt'.,N|ˢKG Ɠ ctE=Wk739cU>>}Z2r p~|׏d3)eR|Λa/o W27[\fɭ݌@);85*MS 6❚fQOkaO^om懴>M\̚"^7[o(~!.,җsP#n{fNjD˕֛{f>ߓb2xߐɻg{Irv Pj 픷1!鴞&Ϊvbj͍ڝU;ywW96]7k8J[yQm9 **|$JFH^KR) {5a6 ,]`??L^<ž9Z&(( rXJ$H LD<7JSX1 @,f%X>ad57{} nJ)1D }\䯂h Xc,ʶ8M8ȥ=͐j RCK,5TQ,>j\ q@żACd 鍔΀zIsi,Ti,4b"!:g2>м 4?$t1Ł,J1߹Κpm6P{6}VjǽQ蒆IƈaIHRD`FB(,p*Wj vh5!@QAsg@R*:B6q6k<3x<_瞭~_9'ۦ3veDOX>nR O|r$V OH)-% ԗ֚PKe'ˑ##$K|B{$QsUE0`i58Hl@V V>&Ζc3v9-PrnY:_UkEZtIDhWb_ku~Yj]֭3n96jV9h۞WZ=簙[J_SϦ 7rhώ+O MS}}juosΆ$Ea*˭͟6 6.Y6w/!=EDI_*uɀVbάu3ݶ>-[*۲glu&YN6 NXC6R{1q;"j40*2i#B I% 2(B  E '8&1qJWdI=ii97kxp:zUR(<~nh#ޤ2`B;@]@gys#ap)Sp)Fe)A@)G]248[nLݦ ) הJNl '!gT D)uIegH 6% 7_pl!8ÍV8eĪ)#o",X7*aI^U^\]XUʵ^>0Iq&"i@2ES^H&p=}}tv_Jk6'\c};})ah/k5K-c1SN100q0%sH[]F1&=zz{{rیҽknvN]JonZe]M4ք IyUB($<Mv)~ĜE NV)1'o^)+4vs{vee48< bҢҚPEa+ko.]~7ލƗ0$].;g^c/ h*zRҺdl]{Mz^71Ã<-~~-3ؘK_n|eF.=z"J (լlU~V0#*)dFR˔.DcQ@d|e,k[Giד5NZ]l{i-:B XĞD>'<=<THP\%~6pĕUVC+!՗WXQ|]WOwj~/oEQBzڻjU^ (i/`̕Çɋ^\f0INQ5gy;nc>G`<Vwb< v{ݟ`Rhg0ٸSB-3,=Hy1$?və`g>lo~1Z6ȓ`IllJn;X-^Ef 2Th Bxb­DKuQ E٫楁 Nk)0,E@2\\ E[)G^4 4ʌ) n|L.@PU3"q0մg/ުgĻ|sJNOϥӼ6vPWbgg{ޱ=,p6dCAF !;P3RX̄k fМ+JaDƜUru%$+3X>&'ǵA G*:W8XǓx7E.?{g;7pXnY>>~L>J9C hL"6޻,>j_じ=v((O9F T --eZ,}Z#ES87dw>;uJ.%%3f6 rDؘhu"G2BT7P=ު'slGM딶Abtk?nCςZ8.FRoGk˛Lg)ݩіY|Q|޿լ-ӯg{WrkզEU i\Bۋ.~03 `tCO/O7ۋMw|f;^HO6d3{zxws=J^{r|(osyOڼϜ'kϹkޢ/Wb1Qgݥ w֡k̭rgH޹޵,e.mQE̍^)M@i[!{;Kw > }]vǻs4X.r|/ g B{>bܒ7ې%xJdRТa Ըɻ0lu~L{7Kgp+2vtgp\0oB_ŹZS>[s)H/{1H*SqMA$W2 nxw1 !m{/˭oohoé~m읲Sĕc n Y-被ic6|-WL y`2uwoxd4l;L2J2S9(?zRHalF8@ݠ'KPGH>SyT{6z2MwzhH,12'LB1R_o<Қ֬C:ݽe|jð*]ӦUId  dFG )(<z%$]),wZmwp,T UjR%,Y]s%-hcNEYAŽP]]I:O9?a>5R`%$-p)>&Eks J=sYM>yj's΄ˀpΠqG|vV,0S^K!@cU ߅Ig;HI'A1{"J+AG.Ed;A C0 7bU[B CB;bjdl.gkZAp/~띋Ѵܶ4|T.V'ec{Zb'7WqcR UZY&- \nM7(+\.ەr_8Y f}7]ۻ@.A{6P׊-kK\)Θ$Ŗ%at@\g4heSND{<#lwgBUj٬IxM{ڮClijAx'~oց( _V}_]aM_gލ;L oM7]nz l~ ꍯY("i4nҋ5GÛEGOWXmkoFnz놶"WV3e?dZ v?/.}Ukˍj(;todOD[c441IQ%诲+iw5VR\Lw-bblWmΏU~w]|BUY?xI@J{1@Ju5{)@Jm>L L a&sdzͧɗ7%}c2 i(2ȚKV\e?ol&h.)$"@Y`Ѩm^f4FWOJ+|pgnVG6q2>-Gp^;29F*|S_yWbJWV}2^2eJK6$;'vi7?rNv J1tw~GW퀔a9r4X BDmLXK؍rֶ9R2R89d0:e%=Ӥު&7@9!+]ck226lJ(Q!~x>LL6Ytn=tCOn& 9.{@(lR#M *%,7 N`IVIk''q/ cJ0Yxp,ky()¼@V^kMd ].Gź "O Ӵz&S!f̛\ttG&,&NPT.1DC7.!W If#{w19<$H4[4=q<ݫ'/ު{LݜX=DOj9\t(cbPtCt)bT\ ǹ,Ugef {g[-<-){v]G=Noa6n ??=v 5Q <*l "#2D R@l"!'0cS= lMF>xQ:cƂV2 YWuv[l?5y*Vq*V[W]V>͎qÕ-t҈B m3s&""I Ri!2H,:Ă&(1cQ@s=ު~ZT ؾc-"jE6y[TKZyRy U `XRh.\pƔFI6+E}0TR\ $4Ns ůoG|DC.N:fc.j]:E֞Xf536)&8bq%C¢:6g&3DZCœ}͎GC>᲼0a 7YlȽ?5);P(Z?=sy?JPvG[n|ڀKXni UM~؁3pkf ?8`M @)ҿwԎ֯_ MC60gWuu3;imzz Pݬ3~P xƗ"R[_jimՍukfn*=b_sC+ý?>}9gѲR*dhj0Yx@KȀqh-Z;֋ܜyL\G1_ w 櫡EY eSGq5f^RM aR$s5 Y%ά4\g=~m6$C)DY.;7>&EԖ`lHD3ef{n:{FO a7tJB7 KY.>x5[<]EIcTwmmr&U0٪ݩIlN WGYҊr29 )E)rIJ n4E%?'8",;QD[dT$ESxN׬4:D8~k6~LA÷",ApB$Lj#"d84͖qkaeS@A-Nez(3ޞ$f 愧)c{}PJ%nNLB<'G 46BV2 |wb$JHU8#ġ…p W1c 0_4wY 0mz>)DeG'O ϳ8\fT;Jul8eg*KTT"o{|- D򒀚W ٹi0{?xEd4ttËbP6- y6pQ 8Q]U`k=`R{0x5@Qéw83f}JZQ5~qNY#bj`ʺlˈY|46SKc?Һ^=P=V/TOƴ . U&: Ro_ VPݪl㐶zS^w2NͯoTo58VoXpw{ce2Z,?~['SΖ޽y;7*ؗo~K?W_2'n2Z>WO_ro?f{1|[(7L`280봯xE]"t^7F0אB+8{u9Wiu5ge?4{)Ws0z͢țp9WOb.7HH+۠&}eד +S_˲J`6 ¾ӲwϿGρ7fvG_jh⫿iYGiU=&?Dv_gi5?cY`9P_/M&"5$:EJTmj<}}YuEMT\6';#5Iz*dVoHa)ȗwYjׂeqwIPW8 =cf+=n:PǧUAZ_#7)t)sgna ?rܻ/.˽k"q)GJ u,*Q}*SoR\iЖl٘`_n;r06a x qάQ6;BV5e1,p)8 y5r,IoTBYrҒik(Nj_ݘr:ơ!V0" (W&emW?R{94xe]ƣeAIZa(i N'I r8GrTTeFev9 )RT;_EaQ<T5v xl`\ƴٚe;a~g7_V-Gy#0"̳A`C癟 ?DJa1ٶNqH/_6RUj "F~hq'r)FD%kB;b=錅ܮ6bߦ(FCLrJ)l%F أ|(#ɼ%tKkX]ژ-tQ1:CWxR@4f=;)np09sH[G‰uXTެxm>@ʥ$ӕ%W2:Ec~vMuo}zucZ1@LC).3㿛iĦwY?B {ŞsCnoB&diuy/G@R K)Ϳ97X~^n,.- |K_KU@J {;zp2 l~[oA쳣SGWO(;w^;.c fT?) *:gQt6jk*y)UrOa{ՈRrop[l}g(K:d7.QajC쌜)Q!*G[&ݲ~g,T,߇]d.U c)620D}1} wor[,K#%7?p GOqc?QKcw'*9:9A>֜6/4^r=(Qs-9˝R 3ά~5جNJFy.^u*@s1xWPtyUٻ6r$r32"`.p;{عOGYR,ىs-Y~aeIb)v]cUu=OO9t5f%3YOH#+Yܺ۶׃wO&D1P-nw\y'Kc7v˵ZVݼΚ/m^&q_yJ]nOa7Pk[o?m.F+^W'>/ݣdXgmWF CP@zfmlY#fΣm::C&8vFTJ(Xrƭq%6: "Zx E$z,S2m@%Q.E~90(}t< Wk{D.JFV!4/=\ZM Qm5"'"f"ٰ}q@SN^xp '#9S C2Bi]J(ASI8I;HPV2裶 X z<p\fڑ!sRz.I2 1s/cG$c%2s7;} ݁Dɚs\l&`FU6U坴U $#j舳}0!\MP'Y|7)zp@JZu5l8XJi$[ٱ%q~Tgn⹜:,7fmI\5d!F)cRFXNJBK iϵ R Rdd.|pn/쳦FF_\n\VŔ='}y 3VGF_{\5\Յ`SK)\] W* WKKVerr/U ah*HNV^ W!gBoɪ{Fohn L2 13+AQG3['Kǒf dq0smd&kh֡-V|+k^KzSszbwQ <!3`x2  PB >ɠ{Dx[cdW>|ڏ'>Y6CD@ 80Q3Ԥl-!"R])Ke| l!O,pRQ!xk'4j&s:bg .︶ϫR|ji>ﻋo} ujU>zTL ~y\|([*<]TL,2 #g>׏QGS쑴|F:#XMOO6o',12o(5sm@FdYK.\ɑ $$@R󞀸czNӤ_syuV(P1VREJ TӇd5.-"O3'r6=? ǖs&L&3`IBVQ,CB)!b̂3 R]v] %m^3$iAYJ*#Av - Yx#iѓS(u_OΝnIX,e\kVeXvSyq&UK8W8y&>iӞ_&ړG{qCqys-S\}$ާ:qq6k*iWOY|s4e!R5 ;6>@A(˫/_!SdtՌb4KGhvp1)ny9McVr:O܃rk#}Tg>#E3yu׵R2]<>X|{?ۼĜfU db8cSRrn>Og~|5/9k $9 %{:0mrxE2 "iǷϩԮ:F˿Y-\ Ӳ[tkY0 ,jY踮2.֦)#Iʤ?k[sO/ެ+wA _s>&y<:}쫴"6bQ wnqmTOwMozm˜&<,Xd|=GgŶ)mraQkNB%0[+)rIhT+y3=v͵xELQ9 0Wγ&}4vEk{B>6%T^YlU RѾ *jfs=H܃Goh#it[f6AM KBJS[K1c2i7| =FK|1- 1z41I OOTJ;z5V(/:y 0OƬ ÔmʵWvw >p~d/yu^Mx8ۚuG)8 b坝M'&OtRҡ=9j!Xfm nDeuɈl_îCf@sHt#Qa_T>k_yo6t- e%w,몴}S&g/EMɟXHVeM2{^if4Qu=['ӝ'Z8THOg8NeQ̚k(o {=; yuoj6,ɬ1ׄԂb:_^ˏj.z ZϷW XN$̜'rZ8!rGmef>J&6%(/_N˗5)tby/MG/axİ,PMEcIuL1GlCRH][LNx-Jʂ^E5/ł95jho=>dg|H'y`W_V3l`mNJT(VˠC^ږggVԔ;Xg&C?2c)gN8[,m,P#8w$pR프^3gI1+Dף%ɂ@%&#Ar o ZH5WEHLTxPz!R$iža'- '>GΆHIZ !M.ɇw}ji2Xa&%! L6L3@(В F&=O73=_=ly61VZ_:+2w4'H N"UI<#$k郬um葧) /{}q^ g)z[ͤ]9$BD!3?-G#T-6GCQv knwH?Ȏ.@p;tOa܂Cn .WVhςx@NA RAĢ)a0*Vw1 7d"8ыJB ܍L葙R ni"9/m?YS?S([ c|z6xojQ,IS)?OHU/]ե vB5Sy)EoTWL9>1҆t< WktGaK2^gV*y2{ҼFpԄ?nh`S[Z4` Ƕ:iwnۖ 2ZGSJewm )~v󺉱ެ;OY0E2$e[1oުw;ow$lQ=V/in\4F"`"ΖI:~O"RUU ]FȪzW|UZߥ ZJHz58f![h-5(W0gO_𹂃E|,MqS =͏V),꧟eHf`2p<48U)[0x@O]v :jY4utlSHFUr';xds?6>ð6ƱZʄ2a^!X&HSE57V"FMsFV;|xLȇ󑏠7[649?;A<. !Z7aN)jI](g>ׂ%7i(1,cI"kaLwf3fVН̑ҏ.CY,Oi^h.RTH"`9cZ(-PecB)7jEέRȰ, Ƹ#aREbD hl{5q6Y ^Ik?)8 mkl%Y\ |ρFo "L|cϝS x`4ʜQr̍ 8w`txgO 0ݡdi *l"|ՎXb101s&r8(8 u4D{R{,z̝vX ʽ):a&#C MՖ8ې1!@\ k$>1;<4nc<1GHySz ëEpL0y4QI\"KpTcJRrG}QBrFniAx7}r0f߼z/ߞUѪg!L{)c.>]tDB. *S^>h_X> {Ot,y$"kYl2b=6@!5[qgkli9VOjOF7Er,X%1d o6.~unERiSUN u&\-V 'EUꔥM1ʂkE+A'I@h3y}ʀb_˔TWR,bl[I_6mu{Y6O57J[]_}ws1A?U*lh_=O|h֟_'Zommwݻ^g?>jZnm~ ۜ+#:ky$.'<`Vy$y<] I+嘾SD2Z_(a9Z83|q8*G[rV;w ȏ%U1R{1q;"j Jh>FHm{(jo$fT/@*e;Q!(@6hNxM'ݖ8}U̥ޘ`SH%1IrK=tByhզuu{;WW1ӭi)eZ(c0|q m̄!KB = Q2%FLNLj9+j$h{i^VIs/ZT{v Wsj&+|[seK\Ť RM* 0rmC$% Cb΂wDðqmoMJs}yP]"Fh/{tIγ~rj&EdQa`ξ T(M>!oϒgsr۵S)|\;Kc|˒mH)ʓ )"Ctz!S3+ۻ9O`ѹ: =WJ`hrtb aޓIe]@\޵6m+ٿҗM5%rm%ƵyUͺ\xs=Ge6@r!F]H"ANNZFx5ƃP,I.|MLf`d- $y[4i-vH9B̻5 `^~5ނfa+iJu fcMs21#958ǕŸ.-F~>eƌ; 5odM^CֿɵY>y>LXx4hZsp:m'kJ淶\5}eXHl^)}U|ốaPէO' K^"O+ jf$o̾}yZETkȭץVɼ4$Plaib̯ٙzk?GL#iE! -ja{-G52 &G\I}[-,|{a?{vMo Qܔj O R*PbѤ4̐$rjot Pu db0E;ti5;ךoU6[_GEQ;ViU0~Q!G9jy>v"g4`[*$TɀS ߂?|]X*%%"POHJ4e8 ^ o)Z)4Hdn'~yIjзuʜ<7W:/ ޷oE~|rǰ +4wYXHYF3fRd΄t9˹kguWNao+^MoW"h퇰WXI+mo<}#V7~TVɆ= ٻL)6 KFt(8[\L{+R=OuL. ۣ +l/NYG|m$zf^)QBٜ:FDkAOVO/j&gJ~1~͑FRY2JUɢ CܙNMSӞ:5)l"頪ՎB*0F&pnĥ nT:=V9 %{-0SƽuH ÌМSΚÂU_+<: Kty~ކ:1kuӿmYu~Qf"Ւ>_%rqW=aF&IpULފ+BBɕs%X &e@Ew^Ş'>{s@(d>|P15GN@ C12cHb W AHRVaR|LwZ $H2b=61 i蘽v֜6B>O'ۯK{xHmbHIP-%Ȫ!Io<)U,M0<1X34ncH&)'Q;7 qfAI$I&bXXD>uWٮdWii#3M'UY\qxq8i0vr5 Z/};' OdVcْGH8H4H4#d1XJS.IL"0j3" PSdJFg&#'&}Ԗx"z5t쑸lGJgXؙd eX(z,S,(mv#c{dra~_g6^ؠ g h AUJ$9 }C,nNhd0 3 j-Lؤ9NKLI1#fZD"rRu=Us#/ݙt j{l `FB@2H(e$15US[.7|`۾'Gݶ'ůYu4OüTdvϓvUdfɿ׳˷_-G8 o)Fտ`xŰBS~UZjM/gSNzۼJ  1ctac^G{oFИ?,|yQȵ~6\G6?1Z_dRF>lP5aR$D%m0ڻ_>kP]!eFNn)#Z(L15Y<$=Å>"Zciz4ɋ,ҶA뼚> o3LUېoLrSoTα|Vb0nUf~p%z)ڛ{Y@#V]`ASl1T9y:Mi%}`\.q7~Ci>C䛟 mAfrDdž5+KFՖ:W]O[L zy)xMjN  PSjxo9A7 ݔ1!iUءvyaڲҺpjD BMk8XnPm=> **|$JFH^^KR) {5a6N]=s0_iG^x gt8 K<3\x=b)2/#05(NNJAvLP=uzI]<)<+xۻ{UaoqPJ6 x)5i0_ׇb$Uq{pi4OA]AگT/" E9joikln\q|{mZmcܮx10?͛(,Хgy^i(}Sf_7P%>ƥx9M0_i7[ IXV'xKM -@,A,?=?Ɓ#_3W]=5",Lt,>uf׶aZoRQ߉ ~\ gD:9+He `4}Hh@@{q*2,'Ju>1׈\ &1a=]] M%ɉf̐% NW ~dU,E6tl RsBtOWGHWJkJ]%"g0U̐%ݟG1]Е6̭ s+ $UB}g0:F2LV*Lp5ͅt~f0doMOAs_uxtġ%TkU% ̐|%" Xoy.VNoXB+x \%s֘UZFб^zz#҆5SVTL耲H֌Iؾezs"4t|Vp atBii:#i\ F ~K7Q,euPV~ʨSWb 4},6KL$UX3dE-k/k]Ms| Λ갶BPB_LK4.mk-+" N|aԚY +o@"/%" wBD"*76#] jȡtjT*lxJ2 ψ|U+!Jhu Q6Ӗ{:RLP])&' |NWiL=]!]iNȌ Е攘l Ѳϯ#J1ҕANv ]%GWp ]!ZEEJ~=]#]i4LΆ!8 kO JA{f|HYzT{>8ZBAz8U*F&N7@5WTJdé?jygS顓e^PeERRMO2dDW QОe;NW eOWGIWLSrRWX ]%BWVSuJ(yOWGIWꯕmr{堼fN'f 7C@Z%Bf9EAT6tp\ JyB nz:&m] #bk_tp&Քt̘!B *e:Jh:]% z:B^8U|Z$Jhy ʮ-Mr]!`MI27D`Vh_0zf vlzJ;jؘCexVhhML+k3Ђ6=]Q* ͇\NsVUBٵzzbT3-3+Cl*=VhB*佺:J2LH51U#;\@ 5L2.`뚘%2''S t+4dci\Yzb~JFg0_)Xcc5MvsO:qm*]؋q a~rע&dqS]gb?~-,>ͦ+c_~p` @ , 1Lwg]MI­̅Ĕdnm&çz?M?~_g7429uYu88S,NƿoDfD3U#XnUPal,ɪN߭lsUPhvg~Sltr8CCN(+Je%~pRrr|)K{Ppu2+/+;EřEhD+L(EmijsP;mJpV[8AQI畡6Fm 'Ejfmm6|q6 buiQs 'dm?i}W/Sn.I^\oX4t'<۔swMHeC zbS[obqFkzO=]LfN4˭%:"OW>Dr8ӂ`s Qy$Lzf@A})%ObԽGMo54ґb0nD60E+Hs҉'#}d##UZ=mb=k{A L)GXs<#ଔ—z()QD4 n$^*ܲ64 %U>Ay:}E"nX,Ɵ79~WgbYi^ X|P{}׏*iH:PUO}7S?@d5RS4Aj]ʰLv,~ݚaS *&頊 UmU/tȦDw9:;\NjSٗ*bdG[=ws)Rg1X~hgIʫ*\1wᡳxzX"`Heic6D9a 2$甗CXs7.8ג ؓci%!@ *aTVp5qqM6L<ݼYF]Gv]$q׿5Mh4zĪE'Y[*~`)7՚.q\>:b-[+칷곺{P>p?.g B>)0x'Og{6Qhfӿ\.NN7/v![D'xIö?4 g>Lgxo0ŤfE/n9>/`[O]WrS҆7Ty?kbHrq(gxbgpfM9d"f 3Śv1nj"k~=*oSa+QM_UTj%D9$'{O#ǁkQ98ؔ LX/;]΢=w1 ҟ?m C f_ &AuɃ 33vHx4wadgl5ՠ{Vs7\ E5z8e2pXZ3J aK^Ktȃ"iD+^eUX,"A$d jR ΤFtc/9ڒY& 2 9q>VijJ?(RHsjZr!.=>2rqrո ,/|V]a(Vu:׌8.Wo͏ӏNj:Ւ%.x^p1EHUJ(R8jJxλλRo]Umf߼VO!ӭ̃*rkѷsݿI )ts)fB)1]ƨL{1/Q3{e^4)'âW%S3-spKFF+#h,)##Lz\!9I)tl(DHIQjœSSRi ]boC&vLSV)譕o"^}#E|YR%$&5ؓ/^n4#b jIl3ވxy6V#;|+U};[3+˟z65<̀9K` D9W2B,X՞Ȓhk#1[%rdWTIxI;^ۚ=~Y2ueKǵN{l|C{|scoп -U1@xä>()#knu/;SZa3Iq z}wIe3lu[j1{+H]0iՌho5d@r[ \mDp쭆R]qjIU 'ei y8C+a4`Z,q$O{l>U VS囥*Qy ZGJE88BB,eQKt;ПqL]D3wZG+^,2/HJ.bٻ6nW|j6ŋ^r'hO}W[Dzjn"ɎVeʖ Fr\r̓[r}ȦvՓ`=vGgbQ^סMW0GIe(UNF\8Sdqgz5AMԤA>T"V;f@ù 8x9ܨBA RNFN;`j h ȔqoR0#4'(xAcf(M-k2i\XjϭK3:;OBO:q;tE=@\MjALW &=QÆ:,N/Eʨ$Pp\b(Ib`AS.De>9|@15GNd lbe8Ϲfy [aW AHRVak>;cƌLI2b=6:6fg(&`\ Zqi?y) E\|YcRE ~1DLkC*0$5IXDo/ qfAI$I.bXXD[QYQZuX4>^ldG^)W]٩; |wN`LΪ}|xކ_>)ۡqhOeK&Ub[l#xM(9 `;ip)+QD07"tRE#nHΞQk 0)yI|;tLdLBDN3ld\CFǡXm[)h Є"j 1JI1F+XHJ␗0r!c2 Cv4H5> 'H !%@=r4&c2llS%x086"QF<qAT\'38y?.]7fz\0M.t`A L+'؟>k zQ?kg r=>P\)fZrD&"N oQY#h|#J{#{dqLeWMJ_V?FC>E}F-0%>^묽W}* 0a8",JF1,-2Z*)ZF3y`< JE؋EHc6=0XjHB-{AbD$@l2XFY -4^YxXm<ZK@lP*%Wd &1ٓ5/{P֛ТCg9CC( f`; /0IBW721;p3X+dNlk"-r"e`ì6!bb * )E\{Dz3~w3Kh=ɱ*l!-q(BĬaR_Ci{bvz6LA>G&?*U7vÍR&tr<"Lc u ]"(x9#!!&ŋ#kd03m‚h讗*Q#\tYJy|y~ȼ$pzU`-`R;WɯAݩ޵;`=n%-Zrf~TAaa\#@/IE\EDMu|w-ۑfd=ߵzMϫ_l knwU*9 Ro%Jktի]j}. yB24|j%^pɿ :?lvy8XN!Lo[éc_]ufӼWl&-xxqEcg_T\?mǶ\2ُ&طo˾wՓㅼG77-?dӿ? R /..g׿"iWٌ1}uo _y..G)}QOs1]֭ o Zy%iҥzr5ua!tW*1:"X#,WLg'%ǼڷߌoA7fz Dz8{>}aK6UVAk;\].]jen/?TSelđHԵe\RMBT] {2cq2O S]!W'Ua)cc)j$xy {> W#_ŀP4= UMmj|^9РE@`-[AF"*AUY^Qe]m[n@hv3yV;hUmp)q ;&77CZI&ߚ8K'n(~g|sFwzHԬT.X#QrTzsPl\nfL^m Yp{RtʠNy%-aci%vd׼]m^XUq籓wU~i]8 c!_u[.h*((3"z{/JHA&8eDTH컖`LQ"žs&(( rXb`H LD<7Jӱbi#|o4]<-y2xyq۪Cf6n9JisX㑈5(Z9C]9wge]P$v sM/G1L;k`BK 1}m~'`+ɾ*8[D~01n}Wc~LMVa|@-ACu@huT fK]wNJ&*j,M5Ϫ{VY ٔ"tE 'wL'{ mr5 |}{HK\د-t SE3" Uaw)t)Y5]g*Xg'5z&p$p!?E,* m`e)yh\_L'.O\?>?8b&.G-4DGTPJm6>iDcw o@?t)vx@h|ɶw 1Tsi]wZ}/ހ8."4ĥ4S]0depBJep+fGkK^zY<3DFʔ$SEN$zDp."*F̱V$DgN(Ե!u=({f$յ5mx}]Wݨ\jmlC6+dqobb4L@0FqD(X S4ʥȐ41E 62(,`#*$;A (Ut(w"ll8 d2]~?)gEZSi޳|R YV X?9e RH|&1rRz/m9s=sĞ)Ie\ AAq#)A|Rcj$ LV!D!!x0Xl2b=6@!5[̸38[zM/k̞6gUg4f޾|)MBL&'mXX;ZߞNt!Nojug5/ hk^*~<$篻͸b̃yPϦ 7bhϤKNŦ6-ylAFPԶ}w٨%q#(-j¢zQnjwAs|_(JX99vlFHmM7 @Jb Jmc}J! "DʔM%fga}l2 o.E'IZ#gvaկXӭy۱1BB1 GtCa}3aeRvǁ@E:R&z"S&1 9f2Rʃp-uGgu%8[.L}AISW)!"=ؤ~N02zQ1\R1&՞!۔&|i…5]dlGHb/vXE0[8S*%dG*5 DoTÒrgFQ^]?_XU5a{$84jBD[LA"rީcccYC׾>a m5Ar#-2D~B 0ɰ$qaH*~ %Gg xJZ]koɕ+@2[S[uXd / v % IɴlZ$ՒͶI$٧=z^_۝`ܷouZ꜉D5϶k8]]T;JwDUt8\4)S&i Ҷ($mRFh6L,uKҎܮPّkgvO?l щEە-7YflISfBaI} )qe:c~c`XUs!bqг~T{[ה'](md/(^fmf>&϶H*"CA~eUErV9DW?.L/ >4GN ~b9s>bq}~"ybA㤎^H9WyqHpQ:&{w>A ]bоktQqֲ,O)*G5[I|e%UtZ/Z·\63Ԝ?"XŘ`;Xr3mIH9ܬj07#lĈ,x pMwC骡$ʰ`ϣ+JUCiC+F2Xx\;֫Pj9C+f]0$kh誡%:]5VNtut園~LӠ\]]5lBW@UC)DWHW'^UGwM#_]- j7$׿NhSMtJ=51v]m\Ќ`'GCW c+~tP-YJym֦1 lM 3N:X5Jn\K93"`Inp M7V JM Me7qN{IeolsLEBjGmvJ&6Dc ShPz9EF]Q<jpɏZvC+&:@ `FCW  ]5z] 2X %4Bt6v4] K-F j+'Bg@%7jhyꪡ\9xá+/c`h%1jh_jvn(ꫡ+޲XH \}W}vCi6]w+jn9l"Fp/Y~78Xn&a^9-̖gva{_/m@x؞#ڗ4o9qhl֗XXr.ߞ~,7fhy2_I7O rg8Z9ÏS$Eeh$wgN I0[ +:Ҏғ]|6.mqVlj|z~Z]e~~zӌQڙPym3X:+6CA_g%sn7_s}힫i+G;FK7nofiO؞uc{F#\'"B(Tk(ij(Ք"4"`ox4t ])e0CRى<O'Ɯ(̖i!ʙ Ur44 ,Xhv4PZ5Ҵ&ٍ,]ʥ7s촒p~k[mǝ,v}p~!SsS Yz۳"cnF4ǒ$L9!(FcR`+hkh{jyLS@h<= c pTjDtUX hCrhOt,t崣Q`h pɡPAսALd8s7*;]3[^jbհj *ܤV͛7+h,^lY9Qe}{gPˋYe[BiէWΦk]hŇ?je 7!HWon`'ycqy er[p2k狒]HjN W!޼vZy[?\+5'+>wԹonwlvg/0vZܿWtzwǚ "p;P24+ؽGla`?{煫댓4-=~|8=GA9iM&0?p]\~2]uб?eI%(rե2iȬ;XdV&kuA$&|'/] gafe}.K)4˿Vjywz˻[A&!6{*Fnb}0ZevR &$M;B5^w9W1Dh}d*!i-B ecV)kU5.̒*lC'h;6U4JFW~!)gREA6W^ caB}[T"JP e QђwR(GO\J(pI_JF+K"=Wku&ZnDu{Mr[Nj-jLQi)SV@FG()S)Bzii{e [zBΌfJY kĬ&Y1Zs>JDqٶ0fv҉7YfAk2f(mʚFIiXʰ1V4F):SACëV5ʁآ$W•9!={ +# Id. ƌŠ y#X fU!(DGkE:I%GY=T"0eħP4 }%-OƮYAEE'  h-ж mgu٬8Q^ARXʃmnU9 ʢQQT 16| mW6h UKb\j֙IՌ9&G..00J`a~m"\c8#PV \rsEΡd(( \2J@PA , [L&k UR%B1% 0`& u, |DE1;"dUmYEL+\:X,* l+t_EPB]^ ߁QfBlTzCJȸ@#MAB7[@H e`5eeBE R_IJDq޹TUDb$FYPM|1%.[C&À:3([ f_e `I*- p-Y) TTg4B@Q"3#(#mk͒`"< e̯_Hd+"RV,+H]FkX5ݫ(I E,)VՈyE{*B_,vL% ҪOQV+a"QY +\aF Y+c+F6 βAڻZKo=f<;ծCLE.&jH ]{ #B xu)9Do6Aj5AJpm $}t @jT50IspA)^o* 9%pCCK6FmJ /BPwੀz`B#RT"/a wB)W Z056I*dO˅ YW*ϕ+E3So]u6ag +F,TmB%BJ҈Z0⓪@1ng lZs9.J3)%&}*@$S5%i\4̵=k$v \EԼƛZ@KrG^}06L zx A,gfC8 dFhC/j@[Dbi;|g(AcO^tQ,[J]1J eK| ." w ks :gC1IتXU\| RLYY 4EL 7!"!26]NS@$xiҢkT]!Tv"DoaLQ0@5aW^- imRqlXa! ]Xxtq)&3|@`P;c4ҹNltEl x㏿YFmhbt ) vYJ geSLZ>fGO:.~ƹL`@Z4uggZzXNO' h>mmsK+].lZZL]q]lI3>um>bt&c|cŨa+=*zhYyN=L_SwT!xx@WM &;:moU|#D< x@"D< x@"D< x@"D< x@"D< x@"D< FQPy@װA#:5!"D< x@"D< x@"D< x@"D< x@"D< x@"D< R섓x@M`w<A\Ա:I ^#P1x@"D< x@"D< x@"D< x@"D< x@"D< x@"^+H ̢c)?RHny@d3x@"D< x@"D< x@"D< x@"D< x@"D< x@"D<2{V6z˴vj* TJ/Giv9]|Ęg?fXڒ-A\niY?  [{Yj^Ƙgrun9W:R5)y]/>͇AwKH柧?LjUxߨ 8n_~ZTcutک|u]moGh{nBʐe@\e@Z䡣Ɯ2^#ʐ^/apzU ?f?൱Qw1YoV[w߽\ϮW>?|x9ty~^>̮ףf*Qd ^> $\T+[b(! cjcWXn 3OZ]v6s$jt9=_Z'{}iWoTCY?l.y/H|dXg&uexaiSdkh?rq?~j3=]~0^5y O]ɗ{~_VǦA2Feܙ6*客L迡SOz=/)]q=іW~ۺQim`]bJUW̞ͅk4oZbRs^+z5 p)eg_9X}W3CŏanNfj%ėl۶kG[zF{RU8EKZ\HPP@cb~oM'_ bo_^Ul9S+'{{8%%5=ΙVZ0(ܚ}5Fkno,j^Ĺ_Y߀_f:{o~-|Ï{'ܸ.ɖ,gVm -4YM}o?'m1}W<-7/\,F Y^NptWww'Dmf!:UTZQɨm"GMc9ꃈyVJ[6Lyj@?-!Vbyt0YŝC]w&#ǥml 鸋ihг>vͯGVo^@[r&l_<WnW^M}赝]`!6eѤCBnwSm pwnyѝ=Lqo wvfu-onզZ޺s|h_|C<"' \5~OëeZCMϿue|}wyϟkM(v3buCȍb͵gu2qc,p6w.x)`I:U)i\;vVÎlE"[?Pdc{UI,PnK!ps;̀'aU輌6(Yj5Z9EېS磏Żnp6ߝ]GQoi.->^><{2}?}nXkq9=nP)oʏZ*w}p}d6nnnT'xߧxקOڞ1A7(r=0rx{ S) SS"kTh:{ %gAjZXk/2]%N؟SE:m" +bqFib%~MD񤔚ge|U(ˆ'R3,cִ|$/<1X\esI _=W&k !) {Ҍ9O*Oa/i~S/~u7ȵ:G ^&Ll()S~ nѦJdհNtj%*hIƲr.i)v]Ca-?l<Is?Nn|ˍހS{])AT~^-SbRT;:)eT!N0CI(e G}S7'ρezh"ĎiGJL. tm;[BZ/2x2K&3S&kD;=x`@?;iTO.TZ.JćFc,/6XOsgrY,{Y p\BFQ|.e!V6VÊ+t5}NZnL1:?s*Cʖ2mQ9ЇBC0:H6rJNRԠxHb`%LxVC+tenڱ)OwMVW;9NHbvd۱v3fR,R۠M[<B2tyqKmY*x=IEq;($̊ V{^j6yfQ3 :YK\&Y2{+/XTϩ볿tabyTY.]yR颬c JgTU1T22L/8R"ڸu<5Ec9kR ۧKUW:yg[K"\pe}Lr@pٗVևLQHW.ESkUE) uNp]ל{Q"ӟ;ZyZ8L<5hhQUg`D9Y9orERaiWj~,P%hY,1=Yi"YIv +`@ɍ,7w6?땅\Nkmhȧ t~1iд_N_#ثײ'?H-Ӷ@b\-g ,e?8sg>eɍfEǼg6IkjEݎ/FafՋ.t?/JŕRR.W}NEL09]J̅TsgG^\͛CoK J cJߏ+ (+}ʨYsz8]h֣rI 958L2#0La]>toN[Ka0s/O:'W&YN?3'^L''D0#gœOAOqz> (PI9_l`a!J˯t2de\yEEyЇ\`(&4/.&nW%hU\uʰdI(C4(}+/g]ȓg~>:o rU_"suT^ L慱!CU忤W+o`~%SVjT#%0F2B@[fP=Z`kd P3k,600&ґꇶNҙ.c 30-_x+"T;m<fR O&U`%k9L%@+/0+nk Stc.Uy w*ۨ?YA˴x8,0~<[z4?`[3*g@rKFD§b[,+V?fR#M7W[Pi*7Vw݊jn1 ʜ==toAN? cXA;m,`ь LH' nv6P'.]ǫ"~`%{b~7dFqŹxV>y'|gZb0ۨs8!Wnr1l{-ǒ}/h&K0HOZ`f)(9r &En0<lH v%&x9 N<6( *BH!msKO/vT/qd.g̐XA>A+y|t(&06EáhI> B<a1:)A1|͙Z;@sT [΢"Cܙt.Ŷ.6 ҔY1MZ 8 chbέVT#J9]*!c`U .`q)iU@L-m2CJf/hl & 6ppr\X+q ;MBJ(8C6vxyKwv #UN(kmٴUr+dbyNY Q!IT!ʹ sP2 "]}Wep#;T bP15GN@ )ء`2cHb W AHRVaR 7LwZ $H2b=6|2{mM&]*拷O0bjCa/l qO3MY `*G%!YF'U42dO&l2k %^-1aZD"rR=Ӗ8~nM;ve]S 0EAXa E@2H(e$1pqg- !Bmᬿ[=@X bUeqQLYFh+ J|Vae G~R/;U>矏B#iu9 0eZ9QtA[o_ҌL$JSφ+bh f8Ͳ5Mڧ1h?iTGM JS̴J)ÑLAQY#h|)JKtIgXY躦NU,I0<)c"-[ s ϱ5..EXoGgWcrԛvӖF53X=W̊M.Zޗ6?^T5*Ə@V#IF1,-2Z*)ZF3<0%.jK ;`^R'Dj#S!FD`8,jð7:jnz!rʃ@&N6Oմ t;n+$y})TP[#Axm%s`! (,c#j2>ͷfYgo S_>UAI~.b25iw@ioWeÏnݦקIq0/7&~hr_,uc"7HN`QޝF޼iW8S|SS _YqmXA?Aiҡ/W~7] :K Ԇ!ctAkGs _f| Ⱥ(Ny_{XfTvQ.LU?cY¡FZ_T6 ׯׅ,$DS\\g5[IYʣ4X6?%lת6>D3/o;u 5d2ofMG~],|i :Ӿ/c܌YjofE2fWpiMg&8Ǥ8I GD@Y'fMRIax683-B֛azd8y/kÕh_ug oy1Znـ͆ɺي08Y\K[,_*V5v׬|4vܹYNa YV_)=6Bף*߯&?p8 $#rÀ}T~^:'ө @uA>PIp5fBͽCAmN{5,,RrdCfސ~1 M޺ǴL*1}sB۠`ŭu]΂S! цm%Zܮڥgun}qCUTHTڞԈQ+&#JHA&7J B}cIۋNj] av_Gag oG,uu$&f%̂9-cz$@Kq^IDۍZimƾWmvorz/ VjF]L.o?ęvϿdaEϟ<+()+HkopJ-u˼}uj[ lX64p, Pa:T~ybfUe̓jfU?d:o^e t"^mK)2 8xv6fx$8.ze'_~ɱ{W 0to*+UpRR;JiN#{WI\:\%)uǮGZa{d &qi yv$o >pu+)ޕ$ٿB4K#`Xtca) H|bFHZ%*VEfFD|q`w8q:z]=NJ ;w &'H`q2"U:wrG\z}|p~e%^j2_xzo7E> ;@_ 3 J$͖L& E\cNKL.RTF/-A 7ӫ~Y=콉D?ܙ5 QG8ko8H{mHR ViBx.ɔ_6'\FU!yTѮK2{.cy0dXs& hDo_(|\P;$}珿n OSӕRRiMU}(“tS0t)KYj,Fx"ElN%Ynw y 3} ]U9pr !b"0($`Tb"-cQt1+Q NiɸZܓIi-;vwUK٥T^*L*iSqWEZytEJ%:w ݕPpeٻ?(.j9'QoƓtM |wzFq伉u ƻ'D>g7OhUVH`d|3k>L";-_Eʕ7",Ϡ _slbC}//Tߐ]}}StW7ut% Tc0~ T(F!89@460օV?܅ Z OozjvjdfNx@<9jY~ d`B0nIQdwʙ4(h 9Y!wz1.0 l{=eُj)`4fdd, !Ia "6& \Vη[פR* ,S18$.2M2Gw$*O 2mkH59"}yb)l>^5 Rw F߷81N#v}n)w1+QןB4f hZ?s^.Z㎐E,U#W(Rw$ yV{jSŖE!rw520U&i{Hc7u'F)œWܬpWqM)ڮk97wVxZP+^g%|X5_ܺ5n޵NltJ37̂uwXw͍Wmw W\c(&V!V@ؿR@xµͱsFwd+/#lc֜+ؖ)hGs(78`Tx&i%z2嘳uзl1: !FxEMHY)YH6N,h%U.E[[g{=\ͪo{[K$$WZifnWlѻ)meE>2EN㠽I9yiDdHLj+L ʘR0 H(wY/J:& Ym5qvLL݇ d)[J sF _"/ &InXp6'aSȒLJc=F`X Mz…6!T{Bcb )zq6I^x+L)%f3}|̖e)LSqV:.)Z1r(#= :(42 4M2i~9!YmwNMC??ïJEk`v_WLc2 7To־|z׷geͿArZNe%\ fJƻ3V8aax~pgPo%RC0`6*8iU '*AKFrA@ƽ֨uYSߙG.u44,1qGmg"ftIEVIk9 |01x>[Q3K+@$*MLy[^jM .1mO62e-vٽp9nՌv4ɾ޿qzڊjZqrсБ &>E\ r5 xu;^*W[ nJ.13]JXnBق.E[vc_G _FעSi(TC@uw pa]:=-CYu#Du 񉁮ɶCZBZGZ.\3n7$|βm6Zf0o*d:Tig]b+}f2OV~e< (3"aLȢYЍcY:kuPmW17=&ey,b0R(2$)rls㹂 e#nMFD]scqߘu`J &x-)uE"Xw[0+Sn%Y2/i~i.RND<ϵTn*g2"ے5ys?R2x36G⇻CVlg:*I0FnK&Fh) gn2I׾l2I,_[k5KȒs@]`,y3ϹYjqwꐫq>զ&?,{CI]E/΋~\D=nUzvVMy[<֝fT3>YKInak5@֞ݽ%I gWrU |_KaݷA_]\Um$HF OwXÛh~t0YU4`łEGOnyu}58%*.ֆsZuC^|ʫz z5Ւx?SxudoMB2C}#_!Lr^8q&0D-ܑB敾OBܒϋ5,Y4!04j~D' ՜3AyD-=Ď9fM6A&KBtj1/LH~EuDAn)1c44I OJ_gW8wzZTd "@+4=Q}φ?60mM'SsX*WcyCOY 106zY{iVu<σP'4[VdfƜ , ;lRi2͢@bmӢ!~Z}ٷM-e2q_YmT^ Wi΄ꪆj6b吶IF!RVp wGgXwe2[Mz:v Z9n@>q<p)e+@`,z^xaC.( QBd,[}BCD);Ne3H^8*`Lʶݳ;R)!%ŕ],+K9:,86C*8ATkɽ F&s5qcD՚ABu %!'UI)4|aܝnLn^#AࣃZ2ɟhZdR> :rPWZYc΋e)l';(_̢'{.K:B{_@R!AH4$0'#9&u&YMv !#!Ch [r<')d4 U"gy[e*Fi&撓sD2(OT6Il6&Ζk!My ,?$> TG҇q:dbl.YECEX1r}4Ch6;Q9T HG4ςO!Z橏p(M^ T:T!ژ8'j<^gq\Ўs(e[ӓEI;9DcBǷ L_:l&s^|i7A$M̠FC&ýJѹ)Ky\e굎تQdZ-,.:$NBO B2N p"I%|ؘ896bj e M/23^3;Y&l.K5j֠P4HG""j' Q"J\8 سCL@e֜3A6)W#xiJ͈FP31ۂڍiG[P[u1صJ2c@!<X 4c!2>mєm(S<(`) k2J84ŭtPMSlQ4v\mu6%iM.xw.$ eL,hoJ8"} 3Cbk) 1!Mᢿ;@آ⊀4~,ȭvlk6;nH< ()).4ȥ($Z(&*Mgm;=zΑ+6=4,j3[W.)?ub`1IA-SH*v1O-F{G6PkrJw1, ǽ?gɼ9>of>A1ޘH-&.$ӆS0Jh 6ƒSI3 H*mlF%xΗMlNC3: &e&ejDzM4hL-c/ؘϏA߭/;+1o>7<•xEm[{-6dVR=9P 0L'i5P5JËx>h D$I/:DxDȺ\qAuTq`L1"S!%"b4%vĆFv5?tup;oV\OjiLޟnyAJ뻩J鯐OU(S1)BdYج_:ֻIb^+ &Dn0l/5yWnv'I.T8a<ِR#w4PE/ \H%G&dT9u0~w%րh * "u@RY5&eʅf$|JnGҪoQ'Z{dwfvr+>>}? ͗~{ՋEgGӑ ˳/b(_Nc\|;ӓCD? e_{/GKy?|ߞ~?Nk#gwx>|@6b{KM//ޜooMƏ9I9cpycXQh?פɇ^ft1q)t?]RrȨ:C*Mc^Z1?Og )۷e_{^vQ.L;=|e~.k2Wײb}ey(srKEIUl%dQeq٘=<^rEؒcmgmBLbp{ۋa^(_\Cg2m4/尮cpGiSǽ<8 {;J佌7$zd.Huӧ6G qO˨tt~$n Ww-~f9lQ.';pWj{&{6⹫f+sWº){U8|wʯR[VڿY&ܰv؃V M.UUx,墄|P\ ZLCQGv2!롶k$LZͽcdCW<ܝ|TMTeV PjzoZtmv1/Ys6rͅ&yu;NhCy`vwe4_xw9QseHL睊{dFI-dJR6n=*\btFⓖE/_ "6j9MұVD + :d e4&{᜖ԃH:ktRLtPlbS六Ǫ6Wj+5\3rņ]5y emO56d`Ѱ~5ysWO.)W4IRZ.!r11Cܘʴnc*sۧRZ&B[rtw;Pu)X(痳U˝l+[O{h?S2`s*E `D%<)&1Єߎ${-נwl4`6΄yrb@Q9)AA\}15沔Bc]'ScqP\eJ/pմpR&\=25JXs!*1<9=ɒIY1oz>W9.C *z $;&Pic 9WjX% *h;Lg)` 4NŒ\LI{; nexUbq ㍷zs?d4{PUuAj8 g:ƍq CfhS2'7<ͳ~ߪ׌1js]{"ݥH!)pHLmUSQ\TأV`f*/-JIW,%Vۧ x`* KYZ&WYJ \Idnl\Y`ߕ PZ)Z SR \)NY68*EwnǓx LBur*ltt`;f)Sd_nfГZgmv_s?3M)?b;8Rirr!UI3{ $73п0C-V\4gogB{DZvN-H$>xHU^}} )}}G#b7=;^QjꮾeٿWw9|?iZ{JbRIͻfWM/?`ypn3%wW=Rkj`UB7Uڶnv '$^[ãNjN7ANVT` *9V'(>䖥\{R۲t|‡:tYV>3[S {F.LgV 9{A=g7NSHifU`-I2 A76H+Ď@I 1jMP6NB@?цkg4&\k1\b\^z2fI4C{WdOk䭯+t%f^wvŵиIIcFAH.sXB t*&z hh $z8$ bJ8q%׉nhϕ;s[\2BW&b[93K~%ڧo6]ٲ[[C&۵WTͲ[]ݭ8ocq+[lwh!^p|q|3o,r58S^ܴ QǡzrՃQ|t@!2dLUZD.j[8 _QŻūk.k{nh[@ q(_zL6U$1GXHTr]$@:k֔z.hCL$]TwI42/ޕ#b4!ť4wV`ey.ښȲ#)xxġd99MCTUZbqҦ ><.@͆ɕA:J+lKi6%E@ ^by (k@Y,nt=]x<}ef; BZFJm_Q :z51uG]AucyJ Ơӡ镏bw ~)V -MJulf١#abDT!;g 8s (UHч@136)-R$}aP^峽uh>C_k윇bosDu?ں[=/duLf< W[vܲ[Jn[ozk5=k]*C9#4qsw/iсCzKޝhjo~]Dݵ9&w:^~ =Un2ܜ$>jC!ef'Hմ{\W՛.T:[14Cwz+;ϝ{?s,*% DVH%2"bvce}Z{ߛw9_7'8q?oӯW R&@6Z D Ÿb5D_=5UbiyH m."T#ĐdahV$F{` 1x>Bc%n6JF]Tc\dzfkN>mO]M;ev1{ժ 3{¶>,=Cݏ +8JemT#Ki娠<(ӑ I/UZ'u޻aY>!Y*-( 7)3*6\RaF2B1fMS^ ut(p/lX?]MtY*1`3!sIe*Z2)"DUkkwݐmqb"Xց퓾1Xl"eƜkHWeQǥh-ʺC!(.ű D NHC'1= <dZR 2`;$ܑD(^,:)W@xtNA84dR) HmiSBX gK`EȊc$ԤVNQɄ1$UTl;*0@pn,Oem8Orng*{VqFY=s>qfI=;6ɿ--|/̯ٿ.7]~?[B(%UӿJkHh$Ϸ}AΐV^tysis uuU'(5;XUչLĺq4N#j֓z[zfg_|÷G;'Wa{bxاd㏬j?K^ 6KR 8(%Ug(?Ohz0WhRƣź*[{Q,f}Jv'A!6I7~MW^_vk <^5]OƷݽst5ooInz놹4$g?lWZ+o6I4-<~}uSzGdNB] sZ g"hv]Gb|o-tlg6%ze!k+ޙu3 +բ69 x(D=N{f! o66gT@gkS<9 3#ϼH$b/&3V"y z\VSeoB(iV%,?nL3ĴL< boZ]}swm q@ݤuªe< q>7O4!4E2tN!.(,+NC|P"eK1vu\H&s )>2MΤ lEpo6& Mw&NOK٬o)sm>|3xU (S oa!wB5ٚFc6:H!hmKvJⵜM~9`w(A'o^XiǞ&7ROǕ~C#R)@h!PYXjԁ(Gx4ur9rIDY>!d] ؤ_*D_)[E, dT\[gxu;¼RICL,QGQ2Du `Xfƈ͆#X蟤zْ=hUc#C e?ja:j~ >R_Q-*\xZTuֽZT?N-چODH 5mLlO2<,x,̛$/G0'`u9ҰYZ!ʘ1Xס49+jV|1O•LNeNvu7髺ijwm ,@,dT%P01!5%^MdTDoѹ̭ZFlIxA8yp( lzr`6 |NdELFgbkl8-c;6B>(=X񆘽=+ngq>m^EΏןǣb{̚(EL$N*38Cm.m 0uQYa( ,ؖt* 2TM` `0YdkE=L^H\DLzQSͤT`/C hB:i0FlByJ),]B5V2R>%2Cfd(YtTٲIYD\4 s*|HE'UM7͆~E_ V\[D7X"nxd BːKJ!D&@1h"yIke)֜mSB̍F%+UڙBtM6*$4UJmTj8{bQn\tLJk].vqīZ<"$Z`f(H.zfRH!cQ<$*Ţ+K!V,{Hohi}=< [pnpȼUGU9OqGFE縇QQ[R$wg׋kARDd:H]B$gpz՜9Æ`bvy4<8RޞJBۼïZ%Rش} 5hJQ! 8RjClq` u!JT2D]1.0.- I!RC9#dT. j gO'i𱻙^7 b}Q؞"qJZ@,J P ݪZvqwOngI{8+ }JfTa7=c6X )qL -HTK"mLv^UzWN"xInxfTeZ7)p4_ O.-W n7A%=3xYkDhzkdClqpLz+oU(e8YhSC]`Y & ,<{:5ꗄpAg)7TIx4>GoR:D4wބ ~H+r{/gL&U\ReF @YP%ҷ}VsQsdmS*U%/+lΦV k]h V`t;HWC3I1!Q폞+nY:Z\8-.}to-BϗNL$TIB@wkvU#8 Ʌ럣n6jBGlsd*i>;x6|&t_bBfh:\~+Al*;Eap# $g\Kr1irE_}XTjʿj=_r[,dU*S\H%jkbͅ5}os3m`4}&ܺ=TG7Ao+nu=z_e7 ޺ MTʞ}sv@uָӮ~vhX X2^FU7yd\VE>%rFz0I=&D lBxcqˈ 3r7[lP[h <QXArXˁցDsD:Ù1A=pGZ!z ]<΋0)J6#%f[Iە 7Vmr~u Rq9.#%ߐiu_Ǯh!YH.93Fi&RIKq7t[#CWtL!]œc]3-DO79g)XOҵˋ离C~vEӏQkk%L,c05DM,GX5uhgoS-G Ǡ7\M^%TRaRdWGi1*^ zQrōrʿ{0^`<o"y{>>{ۇɃf:wW\/ ZFTQ4Ý di-R:g%9L;Z {J<{R YA}8In WjgLNn%mhgfGJUw7KffYLǃ"] {EV]N+,K)2s8dp:.޷+L6[CJn hƛNDHbJo5*˷FJJ*Aю\Jӿ.^jQ.$28fORfJla4.]DB_'ٸ<&hac%`]R̤aeMEdNF^(*=+y06wA ļl8n;ϝ!1gۻdxcM(_0'Q?b^/͋n jڄ)02L"+.Nh$+>F#V_6禶ԧ;%ww/Q+唖wZ'!#;a,~{K&n/n")34I]6!Jv|MkˌCTG)Jf0Cbkt.ۢ%h9t,A)N7u3 !bLEpJʍ'W J|kN5"r3\m!W Z&6\%(wIjUUWo h9N|wQ#+(_Ӷ2جCFf\IJR<=\_ixt9&/'h'nJ%̥?^q}k3}gk{磾j)1xcSG|cFP.iǺr1w ^¡n0?{o㍍&C$k)ɩґ&s&58A#.)Y{nxq*6.}HܶS+̬YB4+{\\N;҅f=i+pnqde]lbODiQ">͟S̫-QVdI҇҄+ȋxxqR"Yb)E)"KA)c"Y9턾wͿJ`!! tO:$UX.Hg =wTonY.t8ˡIE* F7Am+j/]ۦFiL 1V]5J*=7uV5Oky^e:9<]/#8Rx̩~ZW`4T3OO`08`ee#az$Ɨtjz&W(oB*5`X KRp?k;M'+G%QIu\1~7! )MM>2zKϚed~A% o~RF[ E*@e^X Tf=*RוyRx+om H ŖYa0kG` #L. N3/OϗMlxqo׀Q)N[ R*PbaWA0Ń3-\T0.ZبĚiiiir):ޅI|xKĪ]Gɂ'5RtS>?m[3*g  OAsKij妮F\TN^wz)X܏"HSƍA+-ҌRL(4Hd7NfyI5Xxq8yɹeO:/uA>|WWaA!Vh)Qz.36™N0g9blNo+6-^lJԊDM M W3Ksiȷ;q.- oV[ޖݞ ZѸ Jwm?`xaݺ9~2}-]0ۨs8!Wvcr1JvF 'lÉjxc‰Z4,=,ۣN.6hS'f^)QBYk45L4"2AhH EnJqTQJ !:ݑEw9 R@^'RHDc*mk\Q0L4C(_>xȶ&WZWec?FLnhI> :I "0EN93Xp^߽^2u'O<=4FO:D)1 J` {f01- >UU&Ukҕ 5I>Y-ծ/>ܵ@Kg9܉>$0JR%D1g͙[w@̑FR3JU΢k!Nٙv& 5)l"jL!# &8jK 8ܨbu4D{ R,8; SK(F[L!% 3޵$EOeYn&LfՎ.#`&)II[0?(2"ͪ꯺VV2QT gLS;\yiWNoN) 9eħ_O,#]0v+'«*GjQ_bܸ QլϫӳOs*, J*%bD"g uI;$M#7ɎcXPP+I`-OܑIL)(!fSy[勢W L KNRcp[*P]ՉK"^iTaZL7k+O, R/D>$[TN<#ޤL:x7Q'8DROa(Ȳ ,ϡ - 2%"8hH4!Ty&7i%6bl7T8ճKaV2sXɞIٲpRMaSNۼr:K\)J;2.n.K2E\]sib5Z0D2<'ؼǶmc[<1 `b┡끐'tLֳ bTTqG$<J{blJ1_XL3/T/;ƣc_h {D{#nx%-DR+fy ,)u c RrmC)Z5 6)My1e?FE5G@0fBs -M)%X>j~q^uLsŴd_h E/nx !I 02&8( D"1X)iaqT~~ha)!!%zpa!̋>cN;21T$nh5q\~|"F/M&=7wOzI{wؔٽyLrR#+C֪VWWGԵ7U9Ayg&˥br*'KBA0ER@jl&RȌY95qiBn`*Qfs]p&q.[8j(&Ύ$ϙw:M c}e\2VH݃@8SA !rt%ӌ{$)D Bީ uFg|&oȪU} "5'>7<< LɇM5څj/ӖV u<ĀQq~qgKNW 2amr>r*#Ȧ%mu?jq'_^ڢe7|FqQTw ESg?_2y{;{(q { 3f%/ͬ1BY"#cٯETh)p<]ׁiRE U2/Ju PɊ(Q`mЦ BH0^50j4+Ͻ*oY\xӨ4RN=gO?M45qc6ȟ< T}S !ICO!#%1wGk}@ݦ4 ~/3=*y*W׫]X̜W. K_feJq^!\(Guh˜%Y p9Tu=b3okf:;hr67w7|Aغl~n/ikw)&z9@m\Ll_ ڑ?_Z2R|mj/xCg`X+㬭2J}a9d}7GW˦{=Y{nnAA9Tc%9mIӠx˽λ27Z㴰eTI6:B5DzT(2/, !(v'F-DF /gMy޸l:$vhj8& %f=h{L#LSW nŷZхņɠ~V/Қ?ľ D&*R%Vɼ?Ð;=w"_˿XPnN-AOHρE@ --w\J)CTQg)Eȅ"h%TInɕ;b_]Gdoqoa2X`_];_ҿFkUx7FԔF=ԓZhz0_* Hb DZb1bom+#8,9(%RHn&&U6)>1qhNrMi٢wVTY.+3"X>LGfLU ĕ.JQJ|' x6.RWܳo:QnIg)y qE?i"n"A7U_^pUڋX AT"(}xQ#ء"Gy#hKc8A̅;RtY@EqpJ&f{X>罥Bbc+mٲEḣL(׾hk zoh %E8sBŒ%ٴELwTT֩snZFL&(Fs ؈YwHqyJ75系"(maeSS1;LVڞs[׏(@2kEI /E/G㋞LEv8ۤ~Ah]Đlj:{Z}]L$ܱ%ۚrlj5r77ב={Ur·!9[]=ix̓q7 ^.٢g\MsUխǞ?=۱9wWw }8u|s4?v~j"tT6q$^ 7w" rYc&B7RwgkT{$gX5 whRJP icQF@଍ݺ7ŏZ^jI߿v~-F/jKFB- <3EJF(Tk8C^+-".Yy-תkynX|gN)=ۏ+7RǷ{\mwbϴbl$o|c`B`pPm`hM`(W`d+ BW^#]ipI7++<@m{9KSΖ zo+jPe A/Lth7d^5A>wx%*}VBP]MCgE4 %in MS@7%ᲣWHӔQ$6tG/r3vy=16bPL@gds|~W1Z0L\5C *f[R^7z8OU/-QQ{\`4ڲ fMp ih$Rw+m9^*5tjّUBɺ3}^#] $-XU5S -M^!]'ZDW hς~Y[ xw `0%0Xkk*eV4~*TztfM*9:U r(SC*]ѷCWWOr~uu`!.y\}湫iGW̷6 tE;znjZDW83dѶUB[gNW ]B"L#[DW Jpj ]%*Tzth6">XRw/CzAEꠓO'@ O?2*SA=Wed>^?ݫıu?c)ZF2/ͬF*IW6u ;'N뗬YEH(򙖜eN)bxЄ,qg%o'_w#r?a8f q\EZh]5{q"q-tRtJ(a"@bLkV6J:zt%&M =t n{Zxg0vtzJ &E6"5t' *tP^#])-+LY{*-tƫ+@0ҕ)A*5tlIhh:]%jvYIIĜXv|'wZ~.g$4$mv]"Hg: m+@Kh:]%ut 銨珴0EжUBT*WHW;țD?3$uh5;S#71T~6(? +@aVr][Jrx_'^L}+WUԩbǬxulO]2wx_ :4TLfTdist{|oJZ4Lix?ذ4=^Ft#ȾLj@?ٮ+M6[4ŒZh_jFs{7hemFDz#o,,ԯ*:E`- .u1n6& E cXe>HJZN#9p'5sa Oz5EL 7@/?.]R"0y|iv(N>[~e$<œ؄lבHVج-H+ ԣx5'w{ǜ: I$U "nCyB8/>0znY?뙅4<~ុi:e< ypYx(f8a2?ht>)ge?.PPg}NGЈkxqS]14^,$KJG?-KoWK̅QTkbKss ]Lq}+RmE紏88]h]] G!\cc!{O\\a{MhtS"| Bi'QJ1WLtޫp 4ׯei.-fuQJ-LOQb&Í9 :70o%P0 zrtb CRp,wp4|^5aqnF]&H55q/[ ZOU (Ow𞥹W)g /5 0._p 0[Y+oݚXt_+U\a1Z~*)sc>q4pU+՛vLF^̋Nʌ ; B{'1I7C?ɽʯ }OZ0bRMnÛq=U%=uO]l,~WĐ)8>{;@Lm&wyW |gY6(Ӊ  )NT[/ak fH[VCWjY|1}kTǴ6rLn),}50?ip؞P) GʫNeQZ/Ep?CkU zWͨM2Ao `&^Q*"eCP~߿x!iln#/SJ:#2-Nn,Q.F]$j<[ , Ƹ#aJk%&RHDc685"0t߂j1C(.eiJjS^Ͼ*ൿ#G|YR$$&zYM bzAھoXi8-HJEfָl:|*\[.uHdˆ^Fg<)V]DLt%(H8HȧNnzTD=%ηW.|L]n2'Q]sÎ2aDD&p TQ͍QmQ'(_fs-ݥnz붔bvk|\30 k- h+b8'33vǕ7Kn"#JQpcx0ƒ EXm:9<*^*GtxГ1z!ੋ)1$f=3DΘ8jFZf& ?UzY9=Q4,j/Ѿ\j,A>g>\#j>)f]+L?|k; sTfRdf/Hw;]@%( ҔY1mT1p@H=[̈Kgr$Q锨P1*8tAZB7ڂ2e[0 ^X V%/i\竝̕': i˿myhfs1^=^UT \_z2̍ˣZԠ^D2˘iF+Wš#ǂYѡ3cHb@ мW AHRVaҩZ30I]YoI+>u/PvGi,90Ӟyp{<%iRMRݍCdfѪFMYQY_~e.*2{&Ύn!5V[p7C"CЂN`D/"hф%8)2K52CC*3(JOI22/br"QL^ \{a=㟭ٮbC$JJV@N }xƃZUEFmMxdژ%=)M03z]6g Ek8W%HXMXTj3Xh*cXx+Qyuakgq3N>Ot0Y? Fi yPQ&"g@zd 3J&DdktJ+30cE0`Sx &m,撌P2 %va\11ۂfG[Pt1؍^d :#n` p,*% Rnz61ml@rp \pG,$47I KZpxmZ쐞2ѫf\m6KX7xYbˌdzLZDYq-!A)e:1g63ǡCbkxc5𶿽v}C4};?o1xa:#y? O `N UTi%JR R=6+L=[R>~ >Ji H/6蹫b|(a# 3- ;1wRbF(9QZ`K^2e+tg,7֡X7`I,ީf7`c\bRcnsk2^[|yl{& _gS$,) !kg+^bqCPXTbR:D8"0=d tRK*H3I88JlYuWƫJC~Ƈ|RÛ 1p]UKtfA-6wF",nu ,0%Ji-t@ }eaVqxJ`]-}{{w4fp r>ahߢE7?zu!g?{F!4M?-sgʞXϤqJDэgw-f:( 3TŃMI4 Yl(h©oDcSl¥L3꼌]EzWfr]N·㗹s_a,?޿JYE/̺__czflhbPdK0.H7ipx6чS\zt6gn~*A{FɆm5.Lޅ(`NEU^/E~xS]_4v˟G"sKX8?oa3kn}-(a0GWv7H0OWZo '[pOW  ,"udfO-Q]Q.z,c2&v:]FK~qY{ϝ[/+nGݜ-W_~Hm{TR,68]۟cjܥ37I`ە>i~{^D*iwBq,&ɳ"WAD3BTX,ihYU2Ch&iz}՚Ʃ' [Um6g}x`(_'uAI0JbHCwJ2dt=)4 We~NO7D2J{RYSiψ  R{礵>Zdw] $~Lޔڕ#L3n׻ko^ݼ>א"cR~GRGRűqKVMD8LnZb2:RQ#Q*e#P*ˋ4m>E^)onxԷH:]ZU:ձ,kH, #]mǴĴiwᒴdqJ=EG_84geE E(D@ACcYz zuu[ ?Q f3TBYɈ\$Ȅ!dE2sъ+umTXTXmr[yYgTskQrʺ߇w=\$c>1 *zmAJ.3Ls@I"y=f g!$-)iVZkhp@2cuB5.Ip6 :$}A؜7FtT9ު]Y߀Gi^>~wdNU\qb^ c]YG,,O*]?Ԡ(@Qx $]jrVJah|g9rTF+s?#X#MF 2). BC봤1\9m9"PYW>M^0GiE/"}H4'ɑlN¦% z4a[u>"tJ_4K]P>kӤNэM[aJ+Sd$s.HeZ*P|^&F3< j&#mЉE#M8R5͜@%^f!ytE:6N>1tEN-V9ذԝ^B\`PaWPdR&q W,_dr:$u($NVim28I;/R0sYF ȸVіJ]~:q@j I?vMy ^_S5bͽmIIt;;`T6//e= Fi8hV5mj|qd${[9k|A#B=Y@4 8>jK X:$Y"@ea:87R)N7SCqkײ]zdj+K\uqёؑD:;zPAACnMøA26DrĦ$i1kmx2`Gg Gv=qeDYA+LHɳC魖* $ Ԟ)>3ˉ^ #u`)xr.}S7j,ӒieX=*OW2 E˜E!uqT' /fZY1((YfaW:J[ֵphӕ/؅ce_!kٮG]qJ!E`kO&ĵLJjV)UCP-+Uw%pUv*RZ +dR=!"铁"8vU'(Rj7WjW^1A^+n W+pbJW'zkJ68 $<>| $ i]fH>p=_v9._ 3x C2dh hR %˼q}f:&Yo|c&e7Urz$7 I;_lڬ-5Z ?{Hr] 1,;/~鷿HZy7UE/יVW/ w=3?C`K7;fdz{WU/W#oks?r<˓HiX;m{a 9'W׳ W;İ 5!-ƀMc_'i15Bc+b6xX)7j?<6eمO͸׻!qzeJc z6ѵm8٩ó vg(svU3c9<B\uNuCgH{nɣ6DNX?!PJ֥/<}vDw"Nި抸]Zvvy~@B-cRΥp,ͱV 1~{~K+g%ͤP :?ꋔAeמ|TYF;3Rz"4ʒe$[5Lo'myM,Z7E $S]8yz˒`٧Xvޚs uRSQ9J?ђQ=;(F1J;5-쭘 ]1ܵ6?Q]F'DWlt+Fk~Q:qWHWջ+짳6pS+F Vθ {5bDS+FhQsWHWA+L׉,bzw~0ʃ4=:hzyu`R/=վ\z>w꣫P=;}nAW@W^2VO:3bOٟ?%]BRdфJRN~QՁ ]i!§LPvn}d,`zmoޑk.:a( wjuLJe~2*Z1h{qUP:y(^ #|++K|;/PCQڇ[f:OUS+FR{l2k+^}$ +T hN徝<{BW΄@fBtŀٱGJuFMʤ4룴YFM{-/]_ݯKH A{Y$ C#5ZEr}& _ _B,DQPHڟi. |ReӖt ,R4֧D!۪ |\&a8oBUY:QS#J̱6gωT,h:xѬɵ STBf <6}dca4n );Qƪ)L>"Ƒ֑~X+iY>Q.1 2X )$ZA_<$dJ֦R N*(AK|LԣŬs֞Dqˌ1$%UVش+!KT>F L.9 /6^0J!B QȎHTddC`JR/>q*>_C`1K-O cB|ZS͡mßյ_qx(Fu%JR-*jly 1I±D6b lN}`d Xqah P i# 3=U[˺ CP旿!)ٰ" T74dkjAt- ݘ` f]@HE&rZ!d^e(椨V8 gl4.|OE# 0|IEHV/Vgx$ ې, C`QչQ,TGW>^_EQI9ܶ,9겚 N";VdƟ_#O*q>e[`I%DOBF"G!A]JS0}A^Z@mD%\uj З 7"8Bik ) 3E`JQI9Qv9 ڱ-<  &v6̨D$V N+]Kyԏ+ӣ`IȒ,\\v`m\gT)Ȃ'J!B2~ȃ\ أvGycQ5EP~T}MD1dD c]9Cє ԚKd/h>V #ᬬipI+ j@e# o JP)BM~VD鹿* 2ߨn$,B S5/VКZr` R&3+ɼ>1Y]q$>h0qfQ:Z 6Q\yH55?{WƑ a܇ݻ/"ؽKWkԑ%mQ͉jl"rtwU3tv;BKQ2 5$Z\ 9,h09@n=@Ojlf3)!VMC|̰C> .nxВMv!WU.e ` RDVi 'O "j `7Րa2M OGw+`uE"bʅs " +fo"%ׇ& \ة %1<*!d'UW=k T, .cGȌ "eYq2B15R&H @jȱz] *\#DqQI<(O2tSMI Z`5:3(Ws |jo:k /5s%*\d! A@a}33A: dB6fH+B}!0#9>dx<50j2C)pdXbJ8L*t+ ^=@p!`~?XӦW 9$bUR"8زK1MjS #!2.Hdߋ \N * \c`t·+,;g78j.2gu-ō+lk<4ΈB(D6p0H˔ey[rv W;y ԆրӹFltXtz Yn Z-au(;U׋Efyi<_ℜq[jvj!~wsޯ+_mD#D.rGPoI=ȭg$zr+ ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$zDڛ!ɭ ȭ7/K[=E7^[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnErǓ[Ak InݗÑ[\PVkyr+ $zr+!V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[܊V$"ɭHnEr+[ܪGG+^aE/0]ZTJ^vQBzF)!`gr80R`aVWJIaL=$RZl~gIx~\f y=Pmm YMڕ! ݄ XZnF~b.8Z #ZIdzw[ 9(J4n(,h[qpFASOvWiKˬR89&"30|@u@c| ~lr_)|Z_}tc8Ii=Vܘmڮd|J^^\hvrv1_-k QMۯZVw6ڨ|uMGh?s)gᡫ-:b% o"J!h}-Iz@tp Jqt(=yteL +P ڃ+D$+!% 'B ;xBHWNYg `o`)O+Dk~WDIՓ+3r@t=Ntp`#ڝ+DS@t5t89sFWw嗦_ZnhT2x'X2@WSq+5 N -nBW?v PtCẀ k3B 7NW,ӡ+ɔP (0zX1υ^3jQm%\}z>˿ nNj1I&`Y3;}w!=j㖧I8͗+N:)m Htwf ^JqHT5ٴޡ_}/8?ShSS/eV1*ј$cW661OZg僦*^w$l5O16U(rVM;M&q'uc(+klכƇ\?n_ ]{v-tR,}ǽ߄dk߾ς~&-?CVB uVtվݿ; ljyв9>{kNO˞U:jgeaM$oUXQ]iKQ)V|"ZSx*vmKLݴ>N'~Dʟ;; g`N~+y%m˵]t鶛Cj/ЉgWW_Ox;|@{nOۍ#|{ǯ|~=]?]}!@I,кo^m]{|MyZLj<05Kx8 ,,Db弈u"  xUvδ&@[{] ]c.:KQqs 1zT)e< ^Gf_K7>_WyH;'Ӭg~':՝3CL~î Ã]nq7Aȹ`nu?~\̧egNaui",~G˒ 6Z_Rƞa5 h⽑m?vߴ n1#̗_l^<=<|sNnͺ 7tTkxy6#,  <OR( XѬWG,78;j9 YT8eg6͸A6p>_Xs\'7D%ݡig:=݅fٴwgrqy6o_;W{\wKK͗W}}c| ~jխ_<Kzx,Z ddXUK% mS5OWf4 njUrSfsQPU#fÍ k*TۙX7.2˅g.4ąʅDط9FGCMk']^.j?MN煉?1c kD)lb yYm.mPT\Y-$0ߤS:Y &{! &c @djj϶BnǜBx_g 糼b kfږX"cRwx^d0[RR⁸Zx*'Jc}aƆ 2d e5dD.sՙ%ASwld|Mex0ql_q_Ft=3#F$Fy5{e! i+Q&B amg z3hry&昜*n%$ 2i '̈ߝluH:{="/* /Fddq#@.dX>kU"^| ^<>:Ň~X|i|)Ȼ vi ^ٸV tۋ[* |T {{>YAx,Ŭ٩?[giowlU|hPIKǧVxO|X~AƺR}nFbn:f W.iY޿_uUo Z!g,_䙼/GCSPwf7 qxG^_qEe'2VxyBVKmz_$ mU AUHKKɒDw6OO.M~H%GI |%?ge40mKN^O?qw0 bNv`?E0-3f]Vf* \zs.5OWGw:5@(a]n` }țٔ[g.og,.wy='5ZQϻ #&ʨZӳ_K H^3y)zsF!݇Gl9+CX u[tTZ79,>hͧ}'k1~; 17iov+`։qc?5b?2{ס7/{y4 l,wmHivHVQ.w3_b|:Fّ.o%ْW%9 lyN %M@ %WwoNTϿ>calNM|'%w^ݤӯ>׷+:nV3ºNN45S K̓zx7[pRx͙<5w2^l CaNGvH4KwF}9ܓ.&|5X ,<4lavOaq{c2&nXr߰N%_[sk͙b,כ6v'!tفr9Uб(5s'B"2''EDϋ~3(,&dߤ~QZBSdJXh fɢWg`,W$OnO&YWsܳv O~:0j=QOI}TÕ&y>UƪReJݩWRZW9r*uȺt C@՘A߅bCS#: }C{KA,rE'W䊾#zU d:}77[xalm㍒Mdҗ-F΁g酓ҋac=t Xg (2 2bYu 'Ue0;Ƃ2w xEIZ5׻ IHQL L<FSސJ׵.gP|JoHhyV=f/ow M5jҸ ?|=`n,w ǫG«h3}{u5'JZ½K蕚^STh{wp(O6?X9_~#Ñ&%I @ SIŊX 9$!"W}Q&|)`׹߳C9w7L*f9Qj/h0vuw{Bʭ7`<ǿxgч͗*6>qv xԺ9=WT<{6zQ7Y7+׻qSz Φ;|ؖa{w$g<z ?SuYՐwA &Ԍz=___6LL(DFFeJTA_/Z(*4`r RR Y9) BzY d(yE"MY8w[:g7c7ϭ'l9Oa Ƹ 6o/~Z|,G:ݹ9H DyFQU`g%j%JJG V)l%D :g \W#xsL3fOsSS DLǷ.8>YA{N:G;xs0<&L+0w&*}|i]a[Ҟ#@*˫%Eh{V'oDգea芟񙁮ͶGZGBZ 3u% >g(چ.MJxkw"؆ Bf{u)sSxrSbINP&o,hH$UfMB6BGҀMPT.nsQ,}ԑ"iќ4#3״ bqδnV*bj^e8W5|+AN8xBVR HYbVYZS9f5إ.48ԅɘ'IJCπe;O d(.*W*JUkpg8fZ68Y=\;[qG%[xE,,8HlPA!xҁeScٙ8{2yJWGc=Jnζ6+$4Y eVꥳm9y}Vq=/>**9vyf'z;JisP)[+ûp ITNh} SNKgU;1QwS1u<T"(|xH}ֳR {E]ױұq7G~е;}Vm1#3J8KU\u6 hO=R`pLDN;8\njjK Ɩ`y_ '4ՍOsuB;Y3e9@ԚDŽ [ '΀NJd@H&^HdBNZL9PWvϢ·ȵEi|Ys):n_0p(*S$&S DJ.]+&jJQHY(SJ,-^RBH2ՄAĐdaAiV$PF{: RcNT(}캂sgV{57ǝ9jd@f/5a}X7TAU6-M U-H6"m`V^ʛ6ǿ ;$8a\V#]&z~9ngOPr5^ I\SR0>d3)j3#l7,.z\ 5q\k(!Zl󣰏ǗFvwrJR2r.YwB5Fc6:H!CvsvJ Ϸ̟l`M^ЋM+(v%| J{~F7 N>Ch:`1R((ur( bzsx?$N e 2dR%Y%&l m J/tWE,J80n3!y%IC(H,QG%d//*5V@#v&n+tBҞդ%&~TwF^ {{z5bn 1zO&xu"fǎBA7^4$H62ЖWtPO,OJ lۏIMV@hmw#fDl 2{Vlxcf hT/ ܷ,Zw(ϜO4[Y:*If t1EQW9'RRȻ4|b.1= *(ӿV?`R 28 ;$#Pf1XtW&XАIEIzEV6(܎r V #'&m4ht Ɛv* a@Q,+u 4S?{B\.vV ~L՜I5\&ժ4ai&Wjy0ԩ_tKLn_o7HEv_M/P<*2=ߥ5$4kǛ~YGW%͐R:"ܿ9܅PufMk(5xmYkI1]̤z;Ue\KKg^(s[cQ]|& 4xYm=?gigqq^ғ]'ŷ_YMj$^H/KWH~2dʛNjLrLn)-9cBۇ>e)اldͪ.0yFN+Y3JnlWh:Ȯd QvJdճݮdzj6!d d jvBttut,7Z'DW&DWWT jvBFwtutʼn|;Sh*429 d D42R+lqJ6>(JJ鬐!\-S+@ki;]!ڪnGWCW(S+lh:MgA[WRu+dHWXCYBt-Mm+Di:gRUJذt+M*th4<e-g9O?s`yfp_]fhŁVT]tECOQV%DWX ]\CD*thk;]!J1cTӔ  ]ܗXx&Bܶ]!]q"jqv7=R8yiavfw7h7ˆhKaiT*4h-m;MJElGHӂRLa>d]+\寞nx9y ړ<<&?Y|i 89>S6p8>Ϊ̺Pdx+B~lEdhTB !r-fWI˸$)lŔ`K1|}uڐB͟4Ru hn-i}V2Y-EÜ 'E0糤BIXPp-MG~Qr(GHN ,76B uvc+)YWܑ$*&CW vB4׫z+,i:iPT Ѫ[Wmtt"teB'DWX ]!\L њǮe~teW*% "t+Dx QWCWlϡg}"7ġC*vXjKgF(9i]t:zSg%DWؘd S+D+ZOWmOut"t$&DWLrl2tpI-t(۶gGW/BWvpB>+QR>A'=~@ 3l3KN+&!+HG.+rh e;+E G;?<BBWP PַexJ*[,B, Jp\6̌N.':Bt(eGWHWZ唬+lӡ+YGJ::B2L)Ԏu:G&j]j!hJG8!BVBWV֯ "J9_]=l%fṕhim e۬+ހxGWz->IBYɝM-x ΀oXdhO 6F̴ M#M~l}4jg64kL̵/4r[/~5_Ζhdw#Ή2i*RQ02dBIV(ҘG9Bs L{A+{C$t(-U]!]&DWv,!\S+D<}:]!Jc;:Bu)XWt(- +KRJѩЕ? Q]]Y! ,B"BvDi;zJ9OyP`}>ZqЛlY];TRedBt$U:Bֶa#+&%@)旎I5 \&.F`5q{csqs. I<Z--6Zk ǥU5q5ٸͦgk,@ VdW!kymlKo?+r6 ?妜O'?D23RV, g5kTtv>_';Vn ΦU,h:F"2# (n /E40+9Å,-`~2g-ZmUșVi-98[0-r]h%(2FSA#E &fg5c)P q++LiC+> St {s KX2*eUָ`mr`z8V?饦}( a:#,c0_8 D!7#Ny ' B("="z=lN{M|`=POJ%9˕ 9A>Ps47`? ABIY^,a55/d<N.}E >>:(p2r_qTd d(gk ;ZLP=j҅qmvp5WUFDZ=7 ,W'ŻҔŔ`*7WR9Pf>YS_oW`6 w%+K}""r8 UHiv[NE\1XYpe=\|W`huynhg} Kw\U^էY1 _NSu}^=Ϊl L W*͹ͭ#04c"gFLU8adQAgJPnYqzkoSdcJvKymym`F|J3pny; 򡦌Pv^+JϳkUWMܨʌMs dG^٤:Lպ.nAYmB.b x \, >jOlr9S4?,nщA$vPTer/-[݀ ZXt蕩'%;N &cS|3\yɽW3}B  Q&`d3)Jߢawq~;c ӓ{Agf*`ž{ߟٲoA=T'bc*X,aX. FO2. 5F j&Q ū<cA5҅aڠ5{P1W="33Ifplڮ,G{͓- ag@*M˿xZ՚1e2.k8gs5?NFwf:\?1_uv)Ƽ2 "\t",cp`L s%r/GNyEyW[t?>jM:_|&ՑN^+tko;@ 䱇<ЇPy2XO13y||)ڝeZڡÛMï?mg&S>Ý#O wz0ר_/4 Hi4N @`*&v&6C),\&=p'lDIh%#bZ9[bS]* Xxx MH COTYiTeL g;\0Jty޹9 N/w=7o_潕t ̭nEbzB|fՋ]~qqܨ 29/*R3WdeCh@kX``Lb2Q'pƏ.8n8DG$D!Ib,'fΓJ9#q6MWվ*{5)2-Trqcp#H\HȂbx+j1p#d ȠzHD4P=S\Lt-Xқ8M"ӹT݈̠((& Vg˧<$$$FT(|VvAMgL4M?iqE>(jd(GU9Q}Hit>x}ONhM۔.׶Tl;\_-G2UQy{Dɤ@,V4b22Ҡc*ЦH3`mVUQ[# g%{6fAO fbF/K $Qs-8P g;2*հ Me,= oFTm~UqGb=zY/4n4| ̈4DRQ<(3 fVZ Fm(f2%po 2U:h`{A@)ۨ9!d\RRf3d礼ǂVDZQgy>Dhv[QHI 43H $9:FTDjS0q[Ł4dbH2šX" 4TR'2cVYHspaԯR 0 >ED[m="nZ<*N[4J"^d :#n` p,"J rƴҍ1ke 胅F! I 0TSk8;68O=Y 1:}q+"Oaʉe =&- ?θIyM$V!ph 񄱚xxߓvS!z6OS)??f-)>nNpKy?n+*AMBe_K 2Z͏uQ} a j~jqsqVy-_UG[) (Q75^6!1m WG棽aݠlm8?\[$ZJJ~nB}VHV}k"Z<~4}bNJ(%4Zs &[#d8Ce=ĺQkTQ%,A Bg.$YLf +9 gGwvSz 6V1c)5V6GY^~Jvi'&7ڦE)u)[PL$*ntETb2~LjjdlZ(IG&+ ++DWȤz<< ==5(ї\D+IDI^pγfdMGE6&1Sy ]waTݣ08޿0r~D#sAEZqZ{)c&2q)琊K"K}55B!E?jB If7H ?aR /l JbqHMȅ!ƖBɥF 9 ,~'\§dziu2N$QrIt>y)\bNuNAIS`xzKg]r[(ji1F2'Wd܌/}h#6 \?xNnC'.7=آbjN _)՟/̮ZWSgxJVn>O?5,o5S:Ǚv+߸X>r4Oix$zgdV aOjX塈,~gۗŃOwIE4\ČNè8A. - E[48K_/O>b?=s}]~$4*ivM';Q~&"w'bM7nZ] I;( TŃMI4 Yl(h©Lӗ+)6f u^tVvz!}*O3;ica_)y\@W0VK6b.WW8oYDE&^7XqDC0G'?]M9_'ݭOS:.}:iq.?GǧA{kF6V"vF{jÍɻ<l6Xީʋ~xЭ_c.O?N4\'9J-[p8wSKlp :zmwÈ{fٖ>9ܓtuueiɺ ꘠e{KTMg&& -]Nu{7h/ovSrG/a9Zs39+7F6YiYRRDV-.%]tLcUq}vd^D*Upq'wbcvž㔫U<ޛX>wg. kƯӺ븜{ qKL@DIg(P'Da ])* x=|+ KںMWa^FQjRНiu3ĵVY7Tڌ`0FVAݠ).cfh3;'RYfͷa[>tXx9i-灁&'>5Q![Qynb?߁tY4Kά{=|?[O0t5-rvƃ9+[7bj-ߦInQVxmaB.c~=ɷO—Ѩ2@߬;{Wk6{\f3bZ0[i]pNRiٓ_r?AvO`4th, Xxb[pt7C%g,QDgL A^n[^»w]N 7m誌tmlgh76džBKfѺbn1Н(yv>q ܼ[/mk4x.;Rw8ĝ~it6~}TϮuw_Ll JHū@8}uY ^&pLccM)M" uCjB>8Ǣ9nJwk~QA@3`t^OÈR:ssMDg̭s@M*L ʘ+E &;묗48Phhc YJvdߠR:{'W04&WEWw#oK7]r)5sn6ȒQ&@$i \ʡ&<|$)f%ؤ;bЦ %kwo727mv~I{1y.YƒA T yFWjنD@2LEٛ_0CdJb\SJUftFǦY>KŢWR)p-K(*qU:{4EMyʁBwYFYQ` Jԑk sC*bP.9T]ܫ@Bh1Dtu-YB U fUTj>Ur uֱ cvyx ~j? Yip:֙?b b~\FB j)˟_X%_N%\*]Jjgy'F _]}+< .'IhIE{YP;.7KdV kOYĒ—z߁~e*:&Ɂ6I/(>K vHuɛnyyǚM%7]Vw|ӶX} ۳U]|OvZ$fUvQZUѻ' 2o^^~+|X[⟏zo%Bl ~ZD:j ;Ӥ6 Œ3KBxֲ3QPr UL^5=;7ƝJgggv9's-㻐gŷ0S+tm>`=xVqlf #%WO1?}l)"U2C,hɈWMx[Jy֭Zdjw8)2͎;9ξ@\e@U, u~ %e6ۏMhG`O߮%K)rޝC*jԽV;;9с#`k/ڌd(psuy#ggEPuɤ2xghHZ')H<`+Y*yuѯ=4d.V3Ǽy'gsuk;\ϫJg@&*v&ջ쿸bN.EcE P)U fЊ] s\:(.'s>_\3)?R52f,!B #[4Y}6*ޫ9Vଐ6ڤQ] ?Z?*wٲ\vaxo=2?;_~j5ƨūrٶ*\eMdHe/U<:nACJUr,Aڈ" d-B{V_7V:jkM;lz;x{^_ݱۡ-<k=$-a῾볬x2]Y `L7i ֽ7ni 7s;6`|Gt%n ]5SҸYJnJj ]54y몡t4ҕZuDW 0nJz͚VUCIχޜ(O<-(Ft51hrǡD?-2#tء/le;+톮R \/t%hu}:] Jtf#+Tƣ눮P4ܩӕ5Jкv]NWi4Z{Ѿ}x7h-z)⫭čeƪg =hj5xpbu竗7K =0(:EOit˼ݏo7ij)p7:I*'" ,e/ o; ~N={j+yZAzu5 Vۙ+7c^:]Ot% u7tBW UCv#+Tl:+Be@/t8g:Fb3 oQ@9^%6 ʮ(#Mz@djkVX|UYWӸe=4S,0FLeO([g*˨AF?r7EEX{7x𼂑p}/YCYOJ773\Gt醮\OЕE5u %ZҦJ[n% ]5NW Vi+TOU ] \V4NW Xq#Uk+hh:]5g:BzhF}}NW %CWK381p6ǡ0t5%N,kGtءo&P``U7tCUtut%V{ ]iJuc]5`NW 63]!]ۛ{N@ ؅vO+B؝kbFxM `{8 M7~Vt3M#Mn'O?pʲazR_B'lw qZ|ȹs g[Cꈮ`g+BW ߐ54p$GWء톮\ýUC˓eJs-7l6,EJ55/ 5HӖp~Y_?~ᇵPO˿EdJi_lju.jX-#|Uۤ.Y/ۯp~:]# z>,//Hۮp}Jd:Z2ose6hbeg+tGːnfZs?6,(W|DU7PvZ6ev,[qZ޿kȜcKZۏ%mN>L('GBDYBhw̻s#eNSp'5r#s<{#4t#/vZ~ߛG{q{7 u҉v;׿mi%h) jõe@ldQ-$k6A@S̮=y?YCrPì=WkOJ%4O_4Cpvp)X]$ &aփ^ms#MJblS1-b7NI6OU::y>ҫ5gAx[b΂G^' sG !.A f|)*)&GV0*ZAA7L:A@GJ LEwf+%RI9n+XT5oڳFxwD"=dPI[m (ĩ(Hv\F*^eFT{U]%їT&}FӘy}W@BM6y-d*-G}MȲZPҰ*4]AՊX>sA ԙy% _ho{1#.EVz44U ULl^vN{N&}rvۙ8ЕŲ]u٥)8M-x zjVeڦG1*76B7#+J$W4T*#d`&SQdv֣2{:#.3(Z R|$ LjyU,3[„Lktq;X1FZi(^BddN{=zMW:PCX6\b$*UkE!vކ`^c6ՔW ˥!DL0r( ;fAjoBE!:m.XzIWKl$dip/(m*?!t+<!G 6}^ Y?ۊj+7,AQ,ٺN2Er˧O7>Py$j'c.L0Bg\u JNng3|5[Iɼ+hzvI-tzcd_imtv 0 tmtsO/8Ӌrq~v&?##Dh[z :byWjڿ޼G+ap|m[;_UC[gɰ][ 3G+Bwҟ+yn_0t#1؎ގOܼY"%> }@b> }@b> }@b> }@b> }@b> }@Q<_Y?|P>cEŒ}@b> }@b> }@b> }@b> }@b> }@b>#y!v#q4> X|@6C}@G> }@b> }@b> }@b> }@b> }@b> }@bбSv<> ~4> Jt> i> }@b> }@b> }@b> }@b> }@b> }@:Uo3jJ6׷ͭovb~R׳,  Pɶg78NؖV-Qٶt 7> У+F5Bkܡt*B0""n,~^^盠Z_\_O</hmˋV~:K3r"nyuy?(LF00a X%WMuFEoMWBPɶH[yP=xqHI?֨ʃ"6F/c3>z躘jR_2? L^6`)S pXj3B͡frjfSGN+CW7f) h:$B#ҕueDtEx\7"!:]%896w<""^mԇNWt'BʻF;="a4tQ<]JŻ6WRf4tEp ];t"Z3]!]Eo]Q1Hp ]N(%2ވSz`n(o{5ilszE76*>qsэHer4*C7H J' #T& ǴA~,tEh:t"N1]!]٨`3+;uEh+B,ҕ;""#BFp ]xtN0]#]ZpL8] pXjytE(`:B|1[5bѨ+B{kWR{<B ivc|d LW;ᑶK҉-w+tC/ nLt%b4tEp ]ZeLWGHWJn3uPztut墲w 18+u}|9Q(sdV?ч3F7 ju/h䓧߯#//ZAc07N}/{+{x}#䭑0n)IÎb~}Qgڙksj][g3ﬗӷ]$y~=w+Ǿ?;2-?4X+h8//-}V^j= QWű(BksB(aqn?A`m ~PM֟fˀ2勶E\U2ɟٰ<;|8r=/zLS!$D|M\LXr^b2}}.= ՛wxE~3ð76-VbVe8޷׷Jb=Z=nO A_1N㻫nov^Z+fn3i7??;K{_cSKeBV$1х+9}y{_״Kn]Ңu:9FER֝kSk"zk}73%2JrOy/lӸՌxn?zu3KbH+&2sݷ͐?_ ƟWW;!|r/.8GIp@&X;%O`+Z<j,(A[M쪏_UWɕ xycYEC;jQI EAfd"V##@F3X S~X]s/.zwt DQ.K{FQ?e4MMjow貧\^+TՏd/m0~ G%Jiuk")ݖv;6 fU=F[w8>-(elүeѵs]C򪶒.- <ޞd gW& pCٗ& 3ӽ,婠,J˄':F !MhMkA eI@Ui@7h:E!3'ZZFb<M;\>%1xVh.of[9F]^rEL>5owm_-w6԰ *eVA;]c=+󫣰m޷ճV1i#s/W%1|$j?w'5;y[%kAJQk T$ ^^)W#+JN(KyfHE(!4 [x@ZKRDc4 U"gy[銲WQ4NScp@ :g(@,CQMnpR9& bf+ dҰMl˓H/{=$?? 3 UꈐV#N'"SpMx7Q <"~ 5DFtT$HBS(]^ T:"6b6vFIC;h|پ^gӼ.}:=YHHV WV v#j_/Z6)'Qdhms:9m6v]SGBwky%Sک!r=M̠FC&ýJѹ)ɥ{\e굎ت(2JR@s$,S}cƠS4 R6!HFbFr\ӌCPBy£bR<,3"/S6͓?oP`~OVzM(y#azxDrH۴BJި\%xrY}N'<NCvl`;6T9ZBPз#$&DҥgJhV\SAbq*Ψ}f[y.hj* DH5{T.%s&tx%]xT;-(̐zўGX"EDXL-ٍS?I?OǖҏCQFD}F3"nxERIf (<!K7f A W)SlRHG$AKg\p^CQRőx(nPI5ٍo">jԝqR0.3.qqƣsS$PP =CgdA{%T(D{ I,D,3.OKiAxhze#@زsCB}33*^r#lwQy{$_(QPݰ^?[ci@cްpcj![;PF{99'=~iIwܒng%p9 [YCzp2 j k¦h7Bi'yzc"dLN( ?JIFJTik%J AJ:D欜j4!]7L,h5T"=&b { 3s⬯ǓѯT/cwF}n{]s{/k8/.jAd^7,Fi׃69[ƀ0@D™GA˝030|)ซR0wTQH@SHXA8,8-j罦0YC`l1`!]B=n03,ėJx6.B䝿9'rW2 wym[ ]F=1Ɵ㇤{NRީil~\U gUa3䔟FX FڔFzx v's&L؞bN?W`I0uVt*^hzbɑ@˳`hXo߱+!iwP?g~\m,ӭFjFr`#kė}{DW?_U3kA#w3.؞=ri FsV0Wwfm'}r_O*wN,8 Qj5"VT il Moz8#y&"]47W7_6Χ'<03Bc9e|Eu=CF`cur-8Yk?((iЫlqř'ےC='τմ-uJݽcam7҅' ]#ԕ-6LPjz(Xvm~9G3W=Eo½nvl)zYq%hΡќn+goq+ݛ꨹2$Uʼ:&&T)Im"`PW.1BQ"IJETUZt/xN{^&X@g_'@cLZFc6-Ao zE@/kOo&NgӬn]y“ǪS.˨?pr9xҹz m Os9``7sI$)~ӦGO~Scɝc>xx)U~¢`^+>k|[MMG]-l^]]2wz;RA0Lϒ sLxnnAou(䕫k1;>Pمy0_N~bPvF A!ފuJBBޕ?(wvY8-<W"|n|MhJFHDV+&CH4nTpCN#(Gj]2I>Bq`6\Sx_7h;V_U/3yF> &_Κe3U? Mn_/dKĆx R5eT}j祩}N&zgY>D#9?J$Odnī.b; ui!L(tTHu=)㛖G_!h6o׍&#љef'yjyd!F!E+wӑ-.) E޹@T6~&"?Mvz;bP)j?n06Bnm^6^Lia){3Y u=o]0=v͍$1sܮvmzиD畖a2moquw:;]:Y̳|G]!9+~]/nq}G7vocEh EPGyjrB,%4r)x]s|2aas14~ۦsyj14vH‘BZɲ9[a:!~|XέȠ$uh s ДmDMt ̈́R 'ƃjQ8"F24CB󐊉gk'Try+YqqoFD#oKˮa)UD3D Q$wk<ӌFT* %EȻk[ 5FY#qo 1kBt:Q&lu:hW%]:*\LH~rJ"@@L^+AUB*-J^R[DXa,'hN O8᭵Q*ÔeZe4HRrI4$3+BK]lNXw ]{|hKx{Z& =,u XijmAVkΠF9^(@N<@'w襵S5.Ôxf "PLE7HHSxL!% 5*0et$,DqŜpe%)ĘϦ 2Z1qv34?56 7zR_zAi0:&%5"@:Xd)X ҩ6$}$hg͔ f? d) I+\DaQ@޵q$eO8܏G 0' 6 K+uHZ~3$EJ=Gi<{*)dI"%#0DKMzҁzBj[v2]rUL`:E/f87 o)OlÇXb&;Q2F<h\ 1lN,}|I .3H4sR=${ly--!ûU UpsQZߔ|  mτEO冽竽6W.y+~?4/d20 {t|<{[i*p6M$p~Qq]&Y>JΩ9EC&[*FA*_Ml$X@I1^5]%K7y׶Gt$2έ\Ef ո_Cݯ}oM? jyj}=xLO׿TIgm6W$]0 qc$t:@zyLTϽӃm^W/pE Xxx kM&i`cj68W4ɍ{/*]6ߍ';& u/HT:wڤ{/oLlr;,T]9+^sq[VpI/X RgLҚãAePCYW{ _2=mזG- l5+%K x.9裶t33R'$Qu:qG g!/fW.T?x7rn_s ؿ&]:ײ$٧!0OS7ۭ4:z&tQՋ^T@z>=nqwI_G4MFz.ļ 1ż.W$D8ںӚ݀SךR=\* (OEIQk0Y-eӎ u.溴s9`U'n2ꮝ_Tq\PmW;n]@Wx?Mw ti=B!#q-Iɢ ߅Y-K魲tw ̛eAO, pZ衕ce랲 L01JBYɈ\$Ȅ!dei+M+Vu|fr_lH%tzaAa7yYqz m.TX"Od 췽u^[#)Pa6$c֐|&B8% @29cuB5.Ip6 :$}A؜7Ft$T90wQȹYY_+pungkh+p][}@_1x}z▞g.n{,7;O_SYӑ#60TPON=fnI : dɸH[[ooImk-D"cf[-U0b@y>3K6,[~վJ[;nݫ~m+yͶ{%LH(dV`.FY1fƠEtYf[FȤb2;UPU'.A%0Z#,l"{B&Jψ4Yqrn}sms}$)0:!0V&SlT@kJē#AH s*fp+<(S[l9vުdF$l:V p" T C25Zs Zy9YĽms΄0 AIY0 &zf РYpAWӈN &ga>IBAJU G>r%$)V n݊n}o]Eqܘ`*U8%@sV8Y,r=;> yؤ??]z,Zq--.=~>Vk"XWML!}/*7ջ)6:?9ݨQkTWSC/3)jX߲I* &6PTE]^6^SNDK^[+3;%׃Ul],41L.^pK*YɪKZ⒉Sd\}7??9O#Z͒sK%L`Ʀ&8Y|7Ejx|&WEbjpc዗N]D3j: \UѬpލ,muaX/֫aM3o\ֆk$wc<ʤ_͉̏o^h,_s>m;u-El6lnQ:E@]׈׀|:NBx25Mƣˉx~|`׿u9Ȯv6✓R1W+)_>}7^MDE\b b[H='mqYh T˼6 #-)iLʒŝn_0tjvT39gƃd)1=xQ;1Zz$Î9f %!Pt֍|x̖K^G$::F6Iٕ"; -*aEYnKxb:#K_GAz/mªaLhu6>+_}@&k˴ApL* C]kMՒ`K֣IF!RVp d3lG%d VMZan/wI'N'۵K^s{k2W|soOzLn=p-^ <%-$AWh4VcI3o/»@o$hTVKn7 nMb7{]o[ލ`-}׶V*[2< ;yąZ*{xtفbLXbmi"XQӅ\ ]M*E6?)gfn*5>^4Qm$c8M4d_PYmG>Opi`ڞ4Qs\d6)*5/ٻSm$Eb!_)ʣW{*':q5W,7_sa wpwrk[ֶP9pIk@2K1B0QWD.P [)Taַ4hv,uՐ3P+ԶBڪ㠮ue "BgwfAPkͶB%෨r-Z1˶EIMRt!xL꼐F)ҵloqXCޥ:g!h G{KOO{и.m(d5i^!1mBT޺]weflܘ4TЪieZhf\tbI34Zs //:B։3T >F(mS= /ct/0#~[kP稗:GctOG$:[ K?ɇkFi+̢\REt^`Z$?a6Y-m2i-t@ Q@0r Hp%@LznP<(t6S k<%R~Uc|dd&MȂ+SŹ>X*UxpÑYCɳ"SPAN)+u )o5:W,ZӚG21{WEy3oTwqtOzt9%* Rv^Wu]ちTjy<'<^Uٷīfdv?ڼ"]g ).1(#8Npw)Q<} لd@˲Ft{pg {)=ܗ\A8^N'h*CyCmUvD{{7gy撖=n1K@K=7=2 eN"I+0>L d)m17k7>;>N7o[`vY3u=.q2a/e8}Ne?qY`$}1wV>R$w>/f_?&F#!c)m9kHFLEBYzeXz{-YCA=sRYՙ#G,H+\gRIVX KP<$ܢ#`Tɻ@T YkJ૖2&!P^rFBw~˻HSI}8hw;4n_~=_Y#n}1=^}%1T>-ѓEhys>E_V@ bl h ,7Af!zkL3;s ^>z b;"s_gI&,ca(ROMlu]b2,w)!EEQ)d.s>w*h^s`N`{;p #mІ^ǔ`fPfx4an48$&3} EU7W'ӡ-ʕ]DϊO <"Z$Fׁl$+/j_x>kKqVk'ӯ]I6Oz ϘdM''4'm(a`c{ڞzjoI}|T1@,hdd,aBT8S%HadM[#Ȑ7h ;VՌښ.&`S\X`&eDl@ 1i\Ϥjkj׌J5]Xmf Me]{]xrUA[r@[5)n_q1oeƏF/;c_Xc{ d5y]$*I$plJkA֨0$' C]SZ{> &CHmJmJZ LUe1hlZa2NwL$ך]ڦ=݉S<9 3nvB2M@3@J˙P6t%jɱ.%GV@2!CdEGtMB #(‡UbQ}e}XM54E1v֚jD[Y#^#q׌Iq2b")΁!ywA 'l` `YRJGHS7rƴQ.keEτԨH"Y,i qkgFlI} 8_kvYm]ezŝ3^fdq0,Pd'3%i= BeH.gSBy^<^>5;҇>*lc#z1x ׉~tLZh K>bl:_K+ȍ6L5oV\QYYYYFQp!:y&@/M!iK &R&1d)X;"Kk9 B9d,bO184I1\Nn {~Q-qgV$}+vͼSZwNi_49}̯ws+#S'CGi44of>ElJ- l:<׃$y*'.S딜C8Xag*Vxx24ڼJtnmt( =,K_.pt~l{wxȾy4=E2aL;ڃlέTƽuyɉX4Y#et|M)Us8Q'_]XUzգ(kAc_G]w7i"N@lUy߯X2"&n8ccYƑnG#O Oϧ;ϧ;pL01%uf%i&&13MIZǵ4IfstnrJ85 ک3a:^x?V\n15jqlsPp-^qQ-HEƃ(8g tPx@nt.k5VcGU&c;s%xNEo#CQ،#ZzZT9Qٿ7o`&Ep{͍ͮ/VtM nW+Y=z$0opWROH zEJ%zﰞSwR($DcP&MJC!I&9Tz}.t:B+seȐ+Cdm)Nd0fƓ9&nuFfH咂eQcd*͝t '@Cܜ~;clq.Y? ~j LtHNmv{5}z*Gb~(ӣW'Z2iJ7!jD%cp%oc\ߓYRl~mul0jXVo!{( S9ٞjZ}n}ك-HDv5l_p…_,OID[ tyBZZV] Gq-⬙\;վ#dCF%a=+0W#YW{],#{pbts<6b'Iw(/luJnqߨe~+N楞ϢHL}61,jPKfzw4NovahRˆZiv>Wzr|(>Q}CAT=GU'{mի=Ssv93oۼRb D^`pyv}WΠهmK2c!t4%`b#'ca)@;Je;;Q.3<`^3`AO9DN zԴˬ뤲B="7$8[oC)0[*(yBdɱTN8ƆhH]Cd\hiMVoK7]z%5sn6$Q3%(ZZkϙGAeJM4u`-x_*1)'cw2usZަdQEB}yip3:bbYꓙD k2FicRWǁaȣWwd) ٻӽ]&0^4ݮHO J,/i<*s c)[,U唧!i}rlBjHqY\tu'8 =;^1g4 Yuat"I&I5`hАB 33wu/Su(Swmـ%F}HK0gQxJ\DUkQ,L)fS27:".y !FRY$iG8 ZFpfA!1 ^;8v]Kd4]޻XpVb/&FsyNP31cGO$q3[3ُ!HVz+3s9ID("IqZ491(,,iI`lpTQY)6wf(}ѬU6N=\X8r}` X+0߼g'0L[ [Rzgx7u4DlFD^>O4I>3ii`.ieS~uhu܇{wp*ΠJ@ '#ᥗ=pMX~*Xrg{8W;[C8{`/A F?%)s(aV _ȑDZ#q=ՏUuW0"|]d)r0 $G ylΠDE.+H}7:\ (;&|X%25$l< o7ixo o{v򝧋Wї-dhdmm9z]B=%c'kO~F| .|v&Xܣդtr3"BB*9)z9O>}h'O9<fQ)H9-GƍF:,18DILzs^Z9F8rꈶVsaHVz;~*ӌ.0Ejb7fU/ `tyf0 K*z^܌2{^.%'$weS^ϳ)?ovfO*Wy &OH<0.Y"?!0Y(;0%0='J2F?eI9su5fjG8:Itz5lv ]OӺgW&Z1'?T^6_k2'~ppqY+3y]rF6t3'vG8-o"M\Ukhkj}F0ѸVSj<~Mtxj048A[J֖r"])> Ղ⧛/ߍ\3`y0YC9̾輪7]TTd^F  p7)YUk#^SkoTc- qcĎ83Ҳ` FR@ sD!DqV } 7caiZGF*@,":g $6X?ӛCIPٞ8;zm@Y zdV<^枇R=< ߣOB^xm=բ/~Ƒ{3|-I0x5|݋`L,GR駟_~~?j}ZjgI9ݤ8(k_X[ =HՊ*%Bϻ.F9/yܡ AV?D.J³u4]޶VGo$ϧ́ 909VaV{>*]ԝ5O~2^0[ӢNLߙLFh\Ǣ,kb1%WO ؍uד ;7K\d VtOOkj1 "qHc[9F-SF>&bLKg;ݨ{xG^:`[*# Ƥ5%0.;h61BO,*[=JN"ZA&N}Mgas|ڐE)(?þDRIFx\ 5DŽ`ш+ yo%ZEwWP\ɳ+L> XU*S{cW|˥'9|yt(){ǑGk㨕/qTP!x/v]z#W`q<*+ɱL.2Z+jM]\er>qUR^\BqŨBd@", Ϙ>(IB1nؒi`0G+H y\t>ʋG_ /Y}D;<EȵE3(QAԩwLٓm!ZnۄIZ*Vy܎w TDY|L/Q%pV~y{NYG3iY{K[2.REeZ1wAl"&$1AbX'-uڵ{lѶuU?.a(8P|b #*IQ~&sݻۖ^FGϣ$uj.M(1p/۾g;k{x0kSSR)0PK z Cܒ]Q6eH\=ZMGmoGHo/~\=Qփ(kA? #]ʶGZ/t.iTK!r]-IKd7)dHcϔvD{n28svƐ; ٶXIRdr䌭/EKY8Xˣ`sLz!ʳ!hۂ,Sǂiϩ".DXPq*&כj o(CglY>vGד񗯇TA^4dpq`4ۮZdou YO[|UY2^'uU1)YebP4%3I 0P zvFFC4=h %N(^2 L<spg|,/ս#N\&vs)䜍(%!zG9Dz E]P];ҽ ^c44q$K5pe1ht\ M\Elb">P<@tf(Ek18N :gLyQM`2kQٱ9[nZ'VL%#$D.X)2SP9^n!Q HsZVq߮~Oa\Z F~|2Tp X(mJޔrg *eSiVN΄V8zs}1۸&oZ$cUDKA'3hBƷ+tEq`{qϋfZj{XxCblϐM5Ʃ>P-v{3N,U,Tx@ [vI&323**"LGv4eFZrdv䫫UXI 2tfi}s` T-Wwym\dmNilk"7Vu:tmfs׹׏m4?>e3 Z#h]?Fut:F٬:ZfnÝ7-z^jy?ds{Ocfσ@~xӯg7;jo;|hkΆ?k!]ZʰO=ZmBapt 0 |wKtUU6ΠiT{a^#.B9 v{?"N=ODK5N.3,ZePpu b cNRrMc'дSYv1PAi㹳j޵6$"_9`GkACd;,=ȒWuwO5)ɒm^Y0LRfO")+.ȂA 2m:w JʽBv6La+ߥ7Y~@o=]Bf)]-Ƀy].+$Rd92ӽ2I\DI($,|A{!MH,=GS}{e 7,Cb[ >]P!S тׅt:oJU)N(1f]17mr1q4<a:pOg1F.(7XR LLt x[s ,#]Mɝ.gdЃt7۪wܪer%iM#]z5|wf3csZ9H ެot Pac 8TMu;6l tneJeJ XS1,*Ff\MKI^ࣃЯ.0OB8T9D<B 3JQ{sLƒ1C&*wW_ӹuc4Ւ85d|H!DFJؖ92$I0њϙ} S̟SPP;R Ʋd /<墙tp>aNaɃP(h6"Bݤn(a1ၳ%f)%r!DX}X"U)[hX7_auL*Aْ@/DpY͊JQ!IͼWD,# C[ûU78Fw3ka}:<+xNc7U*aZzWi 1U5!z$I c?rZ=5kQ+<z P(E)Ya  DeHh=_ JBaV[JУ5;xD,T"ݐPI}d8s9svݤ9X/yu{%r?0u+Jګayr3#~!rS )?,{HAl.7):MXWN]z oDr-9W (9WatIֲĠڠ!lF Hb*KiM$ K–O?LN vUӠʇC?}F A*] 85P-<,f0"@9(XT ч0#ieMT#lh%i0kU"ȘULtng-Zz -\cS|s-vdbYO±Cl }亓2ڮd|.ʭT=epi}(5`m/ ($̀83fpf%Y n#1*. `r9E4>Kmf̊A 1+4w&x±^9+?{o$8،Qf: '$H)LšN HFQ }9=Y9Kv,Wݒ`,&j;IdY1S״'.eI4yXzfSѲ4*$,eQS%ɬ}' >bRQGsUJa\=J49A&k/$<`()H2}JBEH=fcVZ"Km=ÀFʘ;1ڍ?[BU.GV:''';g:_U}7YѬ4:y.&3OWn٢'Mcts4CHihmU7ߤA/I}iʜ464ۣ]r/_},$VZk̏JMZ@Fi\qw>.c:ΰs(MթY=/t6Y|j[Q%KY[~U:e7׳oUL*,w3]7=6]~e϶}Hb5 &X,I-ѩ8:vVkQ\$;X,4s \rH2f!(Ey~҉.~/ʵ4?k_ﶫ_LԱJ+}VpWW]z:*z͚)  `9.`kK5<'Օ ̏ޯ Nv2't^fݺp;(&g->η ;[HR #Fav ̄HEj/us|eЫ1'ӳΨ*q󨛇<4gia) uh矿_. ySuW˳ 5 f|3 :]MҬdYXo}ͯ(L몾BP_0J)\-D MF%=b"Ff)+ 1R#~jt:{3B,b!!(@ޓV*C[bͦI)J!bYae`EgCM =FmxmՎ}X5yPUR0Ql%!CחmSG|?E.w*(ȱ z3;~m'߬&oؼIS9;_W_~t݆e\^KfEУ Scoz3: __߽_lGK8!=|\~=Fŀ&/om&i+r1j7|s6=dQYҨAnT&'l8n2qVP׽|=lbȷRk..>yꄃ<]HR%&B@fkSQʀuBNXdȉtv#¼kcsH_?M/?{5#P^kywFѻ}ԍYu}TB1 @Iִ|. h;y| J}r>[ F:Y̛ɧnf)Bt^ {`,dE[+M.z OPEu6xiV0ZI S״ts]Rc 9Aؾ`t" KPI,`brt,ЀSct|$y

YRPJ$Թ}"DQb8+h,9T^\×Аwk@k@ɮV&G09xg%eA+6D2D$'0M ^\kaMJ C*dTy$IֺF9d)00MJ!12) o#DV`oӹê@l:M織ɼsk̺:q|l 4x@}ss1#^}&jUxhhWCOqt$x~'=W֩1>5Q ΑPz^WÈq5^12B ҘE!ff)I1ѩȘ2U lm:HdMQ^eZ5_9$IUEJpE267d]Rgt6pR$lfǗ"RCʱBMh3DCrQ* &ɋA [j56C[C@jOiJd ),3Ab&qMl9'.G?u_ӹQxzҲIy\lӇ*쭮TotrϺRR7…,Z/wN&j\N._6} %ӫT=2`dQ)#] CVά-N%G_իddK›JcR\`KH #/Jܷdm:KJo7ʸ,t=B;Gg;;@wͣ*Vl'[Ml(e6Q X lP3`5*F}N8 $75:xQ bbS?t*M#`K"}G5%vZq:"(G{#N2Dl(6(-,֠jiR ӹ2V FRRĊ.J`+:kr,kDϖ#f"&,Em$I¸3wa/] n&L쇌a"iD'~$%˶$K2H1)h2is?N4N"ƽcmQ5̈eĖWI<F\%t~>W}a˶/_@#Ȇֹ^,h[t–iD=>-@B\׵=Jy:6)Di\)e82IIQY#h|Gv;87T9)'֧ 3V[~b >z(J8a8",JF1,-2Z*׽)ZF3=0%vIo[b-1<b Z,1ȃ*ĈH@Tl22썲)LSr/h4Rom`1wՔY%.WTxi%wSY RA nDᵕ́o)OXE f`8=)O33/|K=KeyڦrT˃72Xc ckQ ׮eF'ܟ5 #;3k=UKՑtc愰##Kbt!#b=QZߘ_/JDYoKtv] !C(jڳ7ِ'o)h| b-8)\HD-$Hya\FҔ"lK6iӍ4vh^QV'e4Lq'URA?d32g׃0'at|D3鍏7*0v Кnj\CV=dn5g]jF ΀_FWTWVLJ҈w?ߴ$42q<T 3XuO.S|k.Sp3P|T:-WܲZ\8_,n=lgo$-B_wgИޙu+PUSlQv4)LoN#>!_+}6g3l򸄢waL)CmsSTקoMjCȿFʱ6$Qs-9˝R 3<?'` Sݢ=O}~VY;ZijMxHiɸ*0w{CX2g|Dv6t}vr}:D$NFsìuϤ?:ɨoz1ݖ\%>|xS|6GgYͫI`1qmy)YQ<)ĘTdNR]e\dze?Ҹ).~ qKH_Un߱TwU0'΋AM]vOp[7]K0u/k6X7R}4lzjaIw/= )ods,br{jŋ}NK/sjGjNsә3v%ySvBUTH'LHR) {E!,#FP*zgQDNj"jh+p'@<QXrXJȁH LD<7J:-czvKz^@"'ýi֔lzVf½}>FW(wד4Y̳aAivtQm]ԫ堧/?OQux7iy0z`.~z'04fDvO^PT{Vfq^c*N9HtWf``d~Sc?(]w])C'5qGi"lǍY#'-xuzn^}RCSSYqw$gY;_uԁ>J@iK_IQN)93Zv#ɦ+t!Oc̱|<C$,=Jn%vٰ̈́w,d: 3o\sZFrJ.l6O٥BQ*IEk-,< RGT_a3'܄$@R_<*992nٯ0Tͫ3:\7Rӭq8i!->>΃_\K{%-HN㻙sxeԗfӼ7!ǩL,c05 s+&9NxQ 9^$.{.A}o4me̖mWmUnn7paVs+uM0AEu$Enf 1^rÜ9Q1(O s58YUjEkl;teHc4JC4SS*b "`9cZ(8L>mxJqT92J#"( S-B\$JH!M/h Vm /mʆ׵\x]}1 ls{, œ`k|'G)1t!ӧ$E n1]cLϿƿw̑FR@9NF;pdqgZ{g˶l VmݯU-{l'+[biT M E* V;f31s&r8(qh1XU ^`qc*h0L-m2e[0SAAcæ`cpXxy,@8y!-Ю3vpV2]bwӀV捘^>Fjhz2svTOs+BBΕs9犡 &e@E}kzl{s@zKk䊀)ȱ5OAz0kQ 97ؼ [a^e&#QHYu2LwZ $J2b=6MVHKDkcp XI{c{_iK0ZąGKUQ@7[x#,M:0<>lia 9N`.0 J"Iry‚'%RaEm G3anߌr7^KK'˨$Q[s29.c[KY_IeYb*ڋl4)DD3 u2k#`h(-6:#-Nw5ʌBTZI@H1 .-I%bd^`A Ɯ$436~flVi mP6̅r Qjx,ŐewG[\n+w~uCnz^c`56h qOg 6e7Hr Țu:A7$gx8h yI|;t$LEt!"'U3M\}aƤc_X[Z+m\hVXhB5H8 \ +Xiwȅ4*x`b^A ('p{h2is?N2Fݴ=۔|lˈaFT-#J7Dk&1X{k’(pڀmrPsNHlP 7Q|1)DXR0Hx҄Ij$fܦjs?#~0xkڧƤd[^ nyŕOә"L x xaA!?{Ƒ nv~?{@ n36 8!twuK@q'?OYtip2/a3? 3sT  j_KO o+:Պw^& Gr8;$꜑[\??IVɠM|}O0[k0_B#-b*2-zo?4蚬z%,yqStnִpNW 2j# :0ҋZQ]>?|vp3>Y0}¿9hp'ss=c~:d*}.?ߦW=J\%·'n(B;r?59*Owt$#r_ox"U}Xd (1hpr0.F(¥G!N|hP>䧙|ZUarXd4^Ae, S Η ޼n}XA '*l/KF$b4_5~W9ܭ_4\?"69mXL$m;MIKo %Wb캹玶G5U6 Ԇb9aY-ԦfHjocqdgλcN p=Ԭݾ:jа@{O z21o=cj*/cyǍVs}rNn :ц'rbӋ"b12qNˏiܩoG!ttjʂ@䪶ב?"ﶅ9um+wCi ƒFBMG-AdX8"sF4yݓuue>CW [:=ϧt.dqY~BFE.~Vozջeq24i[m'>c{q'7Z1H#MV >[nkpPPzQ{1oѪ!TOr.RGy1+]\06⢊%M^?ߠ^UC޼zZ&\6c>?`aNN)w9a9#Wگ~qzZjWPqTZvkEI")wAPt|V;QKjXW ~PB D"eUxmC汕]-zYxV(eC-nk8x[}T~Mʻ2 huՋZ]TABr6Ww7|Au"{ ^+='г܍(פ^Ϧ7(gZտ<_EXZ0WBD ι/R.A`B{f֍(½Fmۈ+:Ϟl{ C%p{p\RUtvv:!I/ڀ[ܧ( A.,Q0$pm<n}uAEꂪ4C -DYD48s1$KYD¸ !KukJj )8nI_ץ(Gfu;ӫuvM+tg[..ڂض b~c^p6H)KN;yd)$,w,RIcUvȐ688X*cΪh)U:"D+3"X>Lj͘ ;dH۝Z#g?oY/g"5d^{uVbMQh+z,K~۷,TW)-X "r(wH "7{D-c(Bb]HK#Hk U,H( rI. ˒Бؘh4hjŝ:J&f =罥BDDyBJ{ZƝB h", x19rKH4C24¶xE!!Uܻu͔1N>1Gl> +gqlshv9B)7mHE[lr]dcV;Bbs4ft? eּ,(pO ceBymeޗ'\EPsYG2 *-?!} 3VdVTDgٶm% ^g Ϋ{ꀌ-wn*\>lK8b_:u7clGz+MZou6nZd8u{oIZUQ/SW\l_h&#?l.GoVq5wm7ݵ_|; 47Fٕ)/J\Cߕnmu-u㫃;=,_=;1g'r*t 8aFCa Ù(>)'r;躗zd;z;0A9ܠ$0-H,H'np1AJj¶ $Yp kUõ$rzb1CE`\ُx9G݀l< ]u6š2<E|H:D 똢6 7C$gfsf A)甘Md>/QHN@Hp&EfbHY!QSl7v(~PDhB]vD!c:7$(3oΥM}:9,F{[T>zgx7y :̋dX _>$vj8&bJzʑ LX}8܀[Vp6݁@wzM&tU-o>܂WP(ФE9[([xmr? \O}Q.XϾe;Z;. RpF<)Q)#,Jrk ԵrǽY 7tkIws g}@f^ܹޕRm<@PoSzTKnN4PHEIPHyhwNFUuBYh\ᕐ1oAg'd(ld U> l. $1O$>rhdßNK5D Ùel lK[;OW큩(NQʱGGOEV֝k'(Bd7"߮f-$mh.q[Ņ RF gv93]K%J%eǝaQkir{F"JޱNNʪW)04gΧ-埦EP dd.;i)7?}-DiHI#c)Y2<&D !QςXA) F6h&z)bL}qE|vnwmIGB9"%G%Y}(R*+TBUY^cRh{fr>ڲ h)[LZ@Zr:XȜ b0 ń*ZPzKV޴o~cr/g'.P& $&0)tG MK16K#XDOgX/ l**RPxSV K8̄eFē#CHTjt@ }=6D't&IƷ냈EĂ ^ -p i!1(E˓AΦWQgx~r΄ˀpΠ玖$Y,0S^߭gU1g7n 1 5S695Iz 4NDi11ȥ|'hadF 5).^e2T\k&X Kg4f0O\G'YMqYk`}bKO En&Ru 5 2iգI|L[vJY)r*P7o˔@\7xwrRnK{Og~|9/h9cb!YrsK%fI5+q%M(|hbiWǫi")aѯsW;vhf.?mZϋ{Nn-ŵ¡hZ,~A)-פ{kZk ^AszIe{}M |S%+a$G'ͼ ׍dEmOd54ymP>ٶa`0(4A/{:_.'zst6Zus Q7m-\iXkR_:2xwh0-_)_2W '껂b݂o-=N)qa.(10)+*c=/uLgbN}ݮϿc#څwh^ &lKMOr&p t`YD.;DQN0)nᅔC +u),/챶،Y!,pƐ{5Zi&r3qLj䡫+q./v`pW蹑'g=VU |Z7P&!@L.f }鴊 Vi6vNHmO M퓻" ".7⮊B캻*R+tWBg7 fOHu<Ǵ\5GhFJ2GL 3Mͩfsj#+NQHꡤ0Fh4A>Iϫ!0>5CRZVO'}w~cqcҬI3])nn79}jsK]k2Qrdԯ탶=>xG(,eqQ5lQF-eF7w]ý+wU7EZmw]){w*ݕ avp(K7?Hk䮻"UzJIm]޸".OrWEZ({w ݕh>+Mk+*⊽yCVհHaqWF*"{㮊]iuwUѱwW]YQ14 YMIkJ#5F!:ܶe/Z]?O烔 9 uUH{&(P׶١pOsŬQrԛE!SKVW-n~l0o_roFHj_^_x]A8儌`]FPx1D &@* -[)07&Q$4&̮>Ûzۛ5sÈnO{$ 4 '׫i%5.F瑃Nk _ÅTZ!0Y\)(: OQ| ''х8$O/Yvpma>i8it|DgjjY!{m޼lm>8mCc:UO+{z{z{zg-{{z{zbv1-nB3NiiV73Bu>~8k6VMSgBhn=\̺#:鼷{~MdnbQ}Ud& Gɲ=LJG-QɣbȰID }B1d!h$߂;I0(L攗b|oHcj2mrk hJ%%cViϧ#f_VIKMc۲g-eyҔJ.Wm2 ?̐ ۚF?OW (U^_V_qsVWqlR:xp1)"%r駺3>4K6VI h~Tr=oH㪉op#lGGVl6CNU$#9h%atǗa۪=(%`=w]bLt-Nō4!}զɢi6Uǫi")u^:wngfӦ ݡUh޽V}p(䒾YSZIְi&˽r?كʤ9i@0J2WœϣINNyv=(Rߟjbi2#}maPofSQ~!#i225M'DcFnNQ=&^+ kMjIV -8|$0Eq|SKf~mB ]s\~3y5"5(dޘvضJuL~ /|g/ P͌i<蕷ʉh#VGZ-sHMI*@;:]Ϊ?w1ѫhIJ:F3& T Ȭ,L KL'ѯM=ªe ƣN!EM(O6`E"pیWR)h"!'0cA0"#MC \& 6EsF 6L36hU)M5;҇zPN}oo}x|=r'r26*Jv! ~|G.ٳw ܤ]IĤ;n }>| &uћq`0(>>#z޽÷}=3OtZl(QSPJS̴J)ÑL‡8FEgQ4WX*-g= AاRv}vs9m0?x6teyڦoŏ3U^Ə@V#`sQ K$8utʖьx\n))+?b Z,1RQap˸0썲Ω"},e`^ɻ8BUZl&!\-La7x JДvk "dlHybd,J81˅ZLx(lq{өzzi.<sG"qFc aHQA25O͓q4/WcMcd8{eW=UKՑt BqtBXMC҄]H1rRν'4~g['~2ZEǚ mj/P۲žv2^ʡn=._g`*@N2e7:jm"8߰+JJ WCRӢ)/P=&up\SLO-$6W3E0rHS8ԅwNMú;\2ZIyfR~~C183RNbjjrJ-KKۧţ/v{"0\Ê]Ѱo:MEES'OM؏i~{_|σk3( ON'8eq};ܸkj73:u2JxP;kH)EԒ)E 0[Kwe|#1Xp]E:OO3|nqܞ䡪Mg|y nEfՑEh.6HxWZo[|xONlxۀ.ԦEjN:i}zo9 ocڵ4QQ- 7j .;y=+$Dgu?ߌ\k^xy9dUPTTfTɨ\`G""E½"Qic]w??6:?$P%fʄȐPT5R(b)|q95g/j.86^MvcJ9qd~K6YpCy-e` 0GM,+&9NxQ><^It>t|Vrمut=.qin´/e6.ԝff%U3KnӢ$JQ0cX0ƒEiaLf3΃̑RC~L'c4JC4SS*b @` {f01- :Hfy&)PʍdAs+:2,"1r8iPEbD hD&vX };i79'DܯfC`X`}rB#E TJpUGJgJ~k%#dE'K#.}A23=9:;quzr$,{-YCA ҔYO1p@H+[0qSőF!/JEύ8`1wa4SK(F[@{yJ|4.UK]*|J <Ì˛H}bG}!9h7wxi>|x;Sΐ=vL[1=^݄W*f'Q2(Xzե},cʠz4=Vr>' }~<# IUXR1Z2b"iZD*w++!URƗVU(9l}=Q| b|o Wxzii<%!vI_^Tlm5CWz4;~T%W|q%痋#T`V(U2xi2 bC@BÔ_$طݧr>P:ׂ1XMG9@.,=H2`\ c? axޯ |^GCck7ףyx9G'qZF[i^ R 'Չ&^O\^xa?i%}贈 '^8K { D IQIiEe3;˜;/*DZ~@d|(]!:]o#>ϣوmoaG!! risdRAtwɐF• 9~-n8SN:;%NCIoE9+#Bsi,Ti,4b"!:WrQ>AZjI p{s,)nU8_U[/k2G8W+v[D# 4gj"r,E: l$, rtTD0ƌAc,6ʌ ;mT 5C& &g}x6,bG76EeXn+zu,KRX?gG9X7@RZK/5 ^r<{q?N`FNrK%?fd΋fX"{b95 >Xº]| 9ې '\En<B7L Ud֊1lf8m d U%OG 4  Y<{p^?6Ş: >#>\Ƌ]b޲kYrLs'e(nE\W7ŏnJv*kǮWnHȍy"IWNM2kW`dm.ViN˺uݻ'աjIsԩo{^iy>$WNw5+l󠐷O˳|KǍ'{d"~Xalmc{q"=qQ{ O= Kp4kgBYg~?1 sWbTɄ`^Q4Q=pOl8H>At;Ks;|,o@KX ?y0H{M!֦F"V0heu+¥&oS&aJ;f2&6]Zf8z<2&ΎKmzB1Z*rb%IF>m.bM]@5ފQ{4h2JIGĈ ޕ$Bi%yc,}i+d)-J~DQ%*IEtG22"8L붒˄.ZCZ$u BȒ);IJsdˑ|>QiWY|AX$, >M %!Tt:oJR(2XEfpwgl޴ggIvS ٻ=&u\Pd1fZIE2<o)Z)L^izӰ ޴>JНM?zT\:P'[DVI*^|IdH ,jVHaeDɓAz$c= xڪ[A;܁ }].疼k'aZ_ON;p:O4M Z:S(Zd-py}vy2}o1G1Qm)@M8:zMVh@LhcłIx,+mm()nOjK}⽰Ӹn.Tݿ.oV7ie=< erϟjR: [ l9d=d0'"HS6J: V9+$̵5>VI+kbEi`Xu,ĝsoPʾsaұ)i|zo]ti8#l)[ln1Ͳ `2خdZ82Lp%PTr<2Zc@Rfh r0S[`-`U%WLJ-UҨA]@u" nhcߏ.[9+VnVMߌmI.gȨp``7BE}tv4d\V1p#F1X;FH8l_[/yuvZHJ[8mTcik~->s~:,۪~Reyj[⇕ce#z˩3]N~&F rlq=h9Va9EPwEL16jXy&y) 'X۷mcUr>۬RzJTJzxHo&`U%CQWZ*huՕAdU%zu(ꊩբRܠ^rBzuHUlEE]Uj5]]U*@ᨫJCQWLRZ2xLfuUe7EeK%UMvev\[w~Xa_>?`Kl(T>Ye){;[~Ebc#Z6ITF[Io_EeT]r!Ť 5' rP9{0$Ty1jUE.g"XG_gGNf鿉Ց_63eƶ*z}o۞HxOa?꾙C^ig*Yۛ;_>ߤ*:F (5D`jHuFߣVӾ?UETKD.R(Y!ɨx+rAT@ JJ$Ccd:+~{۶нTY WL ))Q013p(ydދNB V2&A;@K}}Oq_TTu)ϓPVA/kP|Z{_l}XTlEw "5lb#6WS \VʆmRa 8f=ެB;8EDY(GPX2E0!$eQ Pξrm:T}fǯw,˥;st~h!YSYe(o _EB.(d Rr!2w iчH9YS"("?oqb1x@xE`Xr,ړF]3AIIAV*a .:fx 5Z%kr2CY4^ʘ{-+d\B\tNO'G$/omhxU+/EPOO[Y_.۞~͖*~Mo_kzP,4gcP*EaQѧuȜ_,UEHtyTyקD-1i?g2~MS/k>.c<Oҥz?:ֲr\F/OP9-G@ U峘3Y~_YӺڂ+&$Xwyl,dX`Qfp$a5g/X-I-\;(,$N-x99*lV߭V,ɔ0ʐ(1\Ηɕn֒PLϖ}m23LוUܫO6 ys WMfnקG.i?y_5lmXr/guhyy=yᷭjw@:1ֈ%$|Oi2>|/3lV} ̓[bHR #Fa,Lg~O>Ŋfɷ^\9>69v'ksia-V uhϿ:/^=Hy6uƊ9<ٔK[TdzMHzVi]m,}_2nԗUj յ.(#D1ˌV1zE bl,ƆP@S[kٴh32ߓ- ƄA@"*CK)1 " p!@:L(w1MpeX5=OXhqUǍOϧ}27ࠉZJ*hȖl; QgE}F$RKI ư ٮJdLtBX&)$CTYuBS3rƈKg-փǔWM zj1ЊgƐ>P: 3ALo= }SGk>><&ʚ\B t"'),QB9C >(09fRA\uU V˱sJNY0gpU|5 ~MONwOgTMp>}(CV)68oKG[ 9N_;; s\p5okUI[$L \l.6/@,[a"(%DRZk9[(fɨQY& ZU022c5(j vFPp&W*\q|6xKw\8 f]UOꏄw_lՓ|O=IfYm?ٓ+hR֚ƃƂsEx 0B.o]7գn+b-HPo rN EecS1e̫)zuR!兏^b5ܤ9HUU : bEe;Fs[UvoFJ<D&htFal)ST"zy.,c-\Ȍ v@,,Մ_6C6pWҲbk~md}KPIĩjz6眸V`A팜^g崉i8o}rOr4g9o";m1iZ,''̓gmc[N.-֬#r$өV;VR6`$)dHF'G%F̐5S7m[@GJE!T3Z(EMMgR):\&*= f+RRך3r֌atag.tB;GՅ+2ޑMB"M&L&/;cXc l5d`bbL& hQnN5?j셬]sGrWPWGϣU$Εq%_ΥrSxIYw==X|,Ulcv=$Hl6kkF`4l;f18:Y0vL-rKl?5Tvq*RیR{Dq>Dhv[QH 43H 9>FTDS4q[Ł,dBȊ251E6hNdǬ" &*jlQߧx28W"юqv<"Qi@CqPڋAt $,JI[-gL 8)6\F"9NI$%-Hg8AE jOrq,|vɾr+E(7xY'#2688Z&5CRX= t"blf(!OF{C|XMy"lr *}luNN5n(8MяяX8z 'l%U-FiA4*N}B j;KAV֞L׭kUʧKm4Dl0G@Bx؄ĸAwco-F{Oablr=m%ђN~)ԻaOC /֠~Lz@LI¢FYkb.<0 !*ˍu!ͨ Z呠-dFg J:sHmd:0\fXQcޅ/[o.ەmβp^:ٽ!MEQ<.31tԓ,ɇZvtye)n|bjYB'd% ++WȤFSGWgq=g<_(ї\X+II^pγfxMGI:E "y \wGc1Ȓs& d.*-$׊CڣL3K9TBz(dƑqc}df -xvWQ80'ږݹN ,"+B0r)F( r1dZ@B o`z]A:]&'JgvR "Q?]I[=݉;G҂> KѯD Kq2cDr=t}vO78Y tVbS}?-ϬmN?;2)ջɿ6hk>-F$w|P?Ls8߈ A:qCbt*qM9-&wܚfifnhiv>; .ywi.7Kimx`}-Gm[Vј.g,`mۥD3}l_>{Keͨ/Db^}4;fGekd:faK7cK_9& m % 9* FoKĢtDV7 xNPKY{) JXEN$:#DN"D^4e=RMd*E2Xc?.I^v>txGVfv)G Ў&̑Lu*Rׄhw}2} FP*79AR׍eAO,&o\'}*x~#`)}nlV2"872!u 2J\Ke抣)Gw:B'F/.uH6tANe-V讱b㮯#NZ9@AE-HEvƁIp(M"qǬ!,$lf \eoCsKMqH`ld9qcKZ:bX^>⬯U-{ux%6ܦ*ey$(Pc*uQr4P1bI{v2?Zh9>ĩX{Rr$hWL`Llc`ÔyH<yŝtMZeacJ>=rDؘň&s#w@t6^ʸ92ykɨ4$K!L%<%6&5'D ' ·gg "[&?urJw?.J9)RV^,WP%qb~.4ySӃǿmD`d,˃[b=X>h" }ٓZM*'a,EwDytaS I==-\b[,moq.0uT81{ N#(w}uWgyFe+ea7:؎㊅OMӛ}V[[r_Y;N]-I늤KW̚i, ZߔاM7bRnyv nڮOiy|;Fbo?rnOܶI ows9[3׾Ƿf۽޷GYT8=qGi1aFOۑ9"iW#g`4=3ZX"S(9 )Zzc{7 #6rk5. ^3Ya,Ye d2֚ WIhJS9X^O1GcѡimXm6`϶O9Oe 702)1ΊD R:BH+s2(m2*ExpJuHGkpdC0 ٻӬ-W4](*86(VP <* C1[X#)OC&x.gZF g YZN'8 mU 1wٌ>cV7 \Q2aMlClDX)!D4GrЙwН^Pjv.n%5Y`Tx&i% 8)ǜe3utef^tQ=Ҡ!rcJ23 )s)j醦k,po`,Mh+S/FF7#d㠽I9blhDdHLL=gCrJ~㜒2QGIpX492I$Xf"lN`CBӋ!;*1n 䅷”6%S,y(RtTqV:-*~m9}k\R9ԼId W\ZGdr1q1yRV })յ+.i0 ɸ*DUZ[y΋O\A2DkE<ߔc{.[H|:K6m(3drӴȭWC{770}A.p}Y@At3ˋҳ~~ٗd͝uPjKan `{}~zǂzv zķbܩ{$rNm㥰kwCc2W?Е?ş3-9]q6<N3?8[\ӳȵ?ڮMTk`EK.ۧ- z9+[m^}FtEEt?$R_NAxt1]vgȫetVIYЧݥ߶uMvyV_TwId|~^^]R6vcW`|>!<(կzY/4z& Rr"ƩDJAgmTuנTMFr1ZϱHB# 9hgC߬~%# 8>jKX:$Y"@Ľs4* |01x>WQ3Kb.TW>Fm VOD&pop6X.b`k%5zVN ˭q4d́CGܽӖ_HU]5D5`0Fʶ.]Mire9[JW-%ċތ}jpi3D59$e7b}-%R]KL{!GW:1hrpVMp%LVHM)(j%y#>*~}ϳwc>?A8=D;|Fӵɺ"Ume_j]]T#UĄ.ix}TU_1Q}x- hgF@eebtOR3l_׬kg<4ӋyTǿs 6bѥ{6!Z?,W=Hv} xC~kRz>G)ѬW#`ϋnKmWRzp-n<{p*gݓGI^j4^kȬAo_?Z^^b6A=vЊ"S2Zw?}lh?\lJt_]MͧDQ65Zo0N7mM9>e`Z 2sWʐîƖanWwggiw{fܽFgTQotfLX@bX,eh:Q , " w9т:\(zp3ܒ6nAqw9h_rQ\r~p51,0bՈֆC}^"\[ }l[ \p[ \h(zpY٨W7,F "ZOjDRWK_WgG] >.>ӵEU|\E'/}5\*.sEk$p_,}6;),rj1!!̰} AgrӁ~%_v#aRD9koX; c;O'}sw&彽nax4>T/8MO{NmYAOxϮ-h[5lFJ#?m▞&@TQ({e 3qO˚rY?vW[癧,Weؚ1rw??݄cKC%mm)LMVUˑT6Ng I<ٸן4 hۯh[- xSOrr'UFngS]}0V%f{aXUuzofuNU'LUGQc(U2֧.+IRyaTle 'g5ZAַZC@![X%kbDܔ0j]^U9Eg&&r9}j*zԬݒRnBVd0)_63nՕhS.k 㞕u"V_R]gfPyi|׍A!199oZ*"q 3Z.kȁ xvOh:мGwB8 4M།A9/ŌJq\1ќ 8P U86OeP;8=*#/+0ra]O&~q-$fݸZP|oM/[ݵ=J1(p"P DŽ2X4gm:J`"gJW=$@+uXFS!yCs ~:X!x`!39x!kn 3:]=h^{KH@e@YJ[SģP܎m@}ï dAX?}^nX{ҶAMRFr"ӧ>}vhǠU@@Q/AwAK|̡c8#tϦ]J0S!v页K r[O A)hpNJ@A+^Bk۝ i mBX[v#i-4~43@ e#kvHq؍F#uf XQq4(=(D;䍃"bgukU;aVBVA>fj y:@mDp`k>Zl?vGj\!uf1mpDV(VBJVʎ[op/Godf 1l3P_ۆ lt ƍ![[!h\cs|79Ex>^m=ަ8>ݴkuC$;Ρn=nIN 8z"SZH[PىJhXu6Lk*zkM!*K8Ojl<8f&h31eg$a/WFfĞt*k8n(JxKt]0+9P/Oڪ+-D7%\I͢MA:hVK'*hSԃB]@ ՅXW4S\J1֮:iP&,Zy/e%ah);ZYT1"4[!Fb>7vyU3X&~xP֥21R &Cc#ZmxgLAfi6@ZhYXAlj`%qI6ͫd2bfj)ƅ:z:D=._ђƍ7[@pWuHm @(|j%0`Mi/zV&= ipf PHf՞ss{ކ`ݬ9jrG(0r(;fY69,xI4 ]Xf%Z4yۆOH]A@q rt; T Gݣ^16Q|zzwJAkPZ*zuk#2To9۷{OP V15(fW" Ny< E8,4pCX*҇Q*->R YH|@$> H|@$> H|@$> H|@$> H|@$> H|@R}@ .4Z`׋!:qQzH399H|@$> H|@$> H|@$> H|@$> H|@$> H|@b}@Xy@ ]!\-Aͩ^ȁ-DH|@$> H|@$> H|@$> H|@$> H|@$> H|@^Er|@WB.%"G#> H|@$> H|@$> H|@$> H|@$> H|@$> H|@Ћz{W߽݌GMǨ׷Gw0Zz~UN?l.%$Rы-AO=xVlK/҃c:W#7ƥ?8\(^"\{տ`vi`yuސ7lVFUJT&46U];O ,Xy(a$ c'+7# oF(9f_[ ]i6%\ ъy;]J=]!].`CWWRКΫ+B 3+˵v%9ظr ZJ+B+eP~ 88S2("ʔBWT c;ȼ`g.p]oFhUCIҨ7;CLY&Ovom#߈Rsҕk}"`+Bw#HWi\AtbZQ ]Zp]+BOԝ%]Y(0j+f&_W@(+$g;+YIh-t$c3+F`U"" WCWbϦxzEmK&N\yvhOtzKc[D =]fXAt9bvh:]J=]!] dkX ] ]\U"':\%J׫s+)`[D+Ư[Zvbh{n@i 6Ɵ~JA} fŨ ku)*C1ɘ * Uf*p=r6( =]#]AAtEO \Ǹ*Ci9ҕܺ]΄>5̐ZygQ}䲧+h]I /~rc" /tu>t帵FDW+n9+B t(CWrϦ k +ىgۡ=iPvmlAWC i*0H[ ]\J+B{U+kCJceI;] ]!\"Ru=]!]gDW grV ]Gv"Rtut%Xҹ( a2عEwTbΓjU.[0cyQF\DZv}vl:ۂ|3x1 7fVA}3BiP9fZjс,,*f]#tute$pQ]Xq,"utute7LDWrV ]\Y"6 %؞ΐ@]V"ePR ѺS'f=]!]9DW#6UWW+gӫ eWWS8n؉nVhf;cߩtz:6B|!C+_kTv(zuaU;CW}8NWRΐ$R#쀛q*{OLr+AТ\Mӌy@KkTR2f BkYUU9 lsMcm {zڌwt"j>nfy^7'lϹG~j c?cmvϿE9 FSu^$IE}|&?M~vf<1fGMqW&]7b2#Vv-kSznil]Z}ֵ3ks >׃qcp5eї8NVV\KbT\<gh0>o.Fuar7~hBnV|Nӑ<ҘE<~+9|w16m{xy4l`spckw-:U3-CesKnI:beY\Uo*dtw^n~~O/q #8!V%x^DoVf"~=GOV}mk]xܡKڡ0^C)f>~G 2D>Mj`Hq~Ndvʞ^J-0FS.,? Pz>]]^ra/W*yno(h}̀Kk&aɖ4Ȋ%lJ2Qy4#=3"'nR7iTYGQ_ڑO+_^e*UrEWZQZq1|,u5ZIUxcodmߎ!uFݕb3B/ȭ:5[i St;>uu&O0u!tXM~Fo*C2:.ZndB2s%8n2s;zJ2$%Y,91,uR pFx%8D#ҝp?wiCu;)h-Lk>ZXn }y;/AwyՂuAڀWV&9JK',DeR84[1LrI% 7lyB612QLUյ6UKv q~frPl/.l?~A1SOfZm^  \$Y%lˡi/}5|(4`c[@! I-I2]P4 - a)g~\m$S/P٠꼹Iql+Yq%RnTb n kcmW칯(N+ߑ61|Yݳ)jnH6Mls}| f{~/y!^pGۺx՚zwV _6 ]uɭy9kw@scۚ=a0uSj:dzW2]y9.1/f!0egR>!/wdFʗrv緺ry$;#\fJHCa}KMa׺G#l1M8GFף>;48Li;QM*`葶ƾf`r׍j[蘇2V¼B9Ig|Jv|eA5>%\wSa,=VԎA!3_{h W%3^8shyW!3 I B[}Jo,Je┬U{PKgbYhѱ2;=[ٴCsK^9h>\B2 .X}6&p_[Y 9 DY2$26tuED7-Qr4\%C1K,^E,O.xn' t֦SG jٞSBºmv蒫+'>^ + = 2׏Q26V)TO]UtD:]m=t;`>n%0Y޺#A l22KD+r̹xqteBJ+jHjxClX5eh͐) ~ǂB  FUO"G}J52ts\ڦjryA"O2$v{oxKRRf.Gi."6HH6GƂ% JY]T0+b_CB] ugcA358p>pnS-j9Sؼg'RFk_5n<خ`x7m :VI,6>I>3 BD,:dc>+ EmQ<`Pb"LI;zWw?\H}.z>:(0(7$qHN{D~HxRJN~mzELhCcȦX!) &)SFتRLw]@'L豰]c:sSy̾gyo z%Ұ|pr˰#6mM7w6BT$CA*pyHtևdK|M^j NUs/웖#-E%!B`e5M+FLDuIU-xYqNo%aߺ$\y)aM6Ӵ6ݞ~_36J [M0blu JYb@8TJNczi <*1}xk85@+i\55tvny؉X*/m;np:;zяorrhYr],^g7mCz;fY>*nb]]ta'd;TZ`ܛHeg(8Dהa?6֟ڰ?O/QkXذI$ ,d%WLfJ( M6 q3wgӉl D}K*<;pf j8:1ƪK9^,X\vq_|<Un6o<ߟk}uĺ녂.퍺?l֖Hr?Wvq!ŁAHr^M2ܯC[=zu(kÄn#@0Il5'"͵$IauPTt䐵Ee}BVb͓rI?+4 {{LwzfcuQ&gpU<lStE6@M)ؤļPA.>}WŒG6;[3 M. wo:9#0-MYgR䜏xI~g}}%[ JDl5j>&#>B,DdDRR&t~M*1܁Tщ TWlFEL.~I!4H}) j2{W,)ȡ&+9l 4A8%e'wKGjeM.WąokіqfEKt,Ѽ-\/,a)Þ98]<tst Gwy[.:G푔#ypYr= .NnHΒQsw{YPYk.4-ϱ]v`\[6ͅnogK]1}咎y$i+/G|qpiT,}u&}۩L?zw.k\U|s9 OŷwY[g[4 8rULyV| >I_21 m* \3%R"+YW{EW-_򟫇gZ_I۫隣Q]iuG^%]vӄ~mU]/=Vuw. BsQw2]]j@<>Xޖ2c_ xDSK q0'l^gG}%f6NjxW76IgwN6l> i()NXx~:_F/9;]l8g6ըg]_䦳nV͵%яکoORn"f\"o+./!Ԭ+2ΓlqWGwF%!{t|ue~;JoC޴7UI bVI'H-Tt!4eb'P@&lalD;:]a%a!,Z<*&S56 ETGKf?ɯ~닾M<zw9ʛ{?.Ͽ~H>: ɚ8VOLLEj˪w  b Vd,D,IXj٫P/#hKѤ`~eI<9jXиU;|b1|hkm+rnC<J'Z=#)M~ߢLR]xa^H[Uj9CPbMR$m6bX!ATF-49!ĕ"UKMPeI_+[{sny# B'Em/vH.QIbU |Y/c/D1wi>50]|r:'.MbI[ %0fS> )QAU+` }6)էJwJwۇd]_}yL;ú1ڷDli:픻עzY;v\HK Y(dmdqYE6Xr1y6ŁXJ;I^9>\z&= 2[)yFWY4mƼm{[6J:m]@ zfgm] X`8wV]w2`BDT 5?ϕϝϖ(oV_kbs9E9)ŵT@Oj,duyuE36^ bg$@gKwn<z? N Ej$ӿ^caYY}eڔX2!+w)iO1&YRDw6i:.} hhaUAN4k)u%Rf FH(.hkg*m8X__OtNܩdyC9ۼ'ypt1E=C^ms"&1_}!]%G-bYx: -!hnR䉺RsO~`g \(zms"6y1Ǘom)C`٫+2b%\I j|L\B X$ QkM݆02*ݰe< }g, wW#jv1 #ˍʺ/|q|; c#DM(e]01e!,e!g -$tFѐWY-MAA ܆bM&lQ(!o* T1UezgÈNF/:SN55*4 Pyւi޵VsB\dO}Cs ND 5H-{*a2 q֬BT+*Fb쌇݆0AZ^TD 1L8!6i rQz4DDM% ) 78UrkWMV =Hl^Ԇ3Q{+$K$m5d#Tk8P|bX9>uv"uEpqŭo4x#a"FSBiѲK(ZUCC*jpq7x:R=xߍ; _|2Αj~9-$)-8ݳO/Í>]~=I!%) N@!2gJzՒ8ݖ?[u_WA$L:ZKCj BѩXˇ)`@E֊\>DJP(LTRvTxX֨^t~֚q]_:Xvyu :Sqz'0'ӂb%P˖!p+k0 v͖C0 ~`H61:cI{* *Âjnͦ Đr6qS.m橤 GΆ<Z aî?ڈìmlV'xdxTFD,|ZWi$$t%C)&q>sz~Lгz&ِ'Ժ ; w2ZVBjeZʋa\(Oy]wM}慑{ x[K 94VӐYT#ך#Wr& opb>  =4"E#Yzxc1N|7^!dðh_?9DA8#=dK)ۈѺbkknHŗo zU!uR>.\%&lgkAI EĈH==m8$ Ʊ@R6LĒ4Kht'4Mj 2,E>JRzYgXo±x¯~DS0_xfWA k,0=}jL e^yd%?y?EuQs0$߀Ĭ`rT}wzWm$ fmg#" %^(dAs̆qıwY`C"(7{<>ұ7XlU^zew!_iE*!'-Rޗ _)}`h`|,1o0-Bi6߃aa\.|U4_Hfo1|8? N\ٖ9NܲSD<AiWM|.G\GF7` -oc̷͒[Ȩ-HgsLuh[8%Yaq粘fp`4ɽI(2ZG_.ף?oN⋭L 9?wiڢmK48 Ǯ؂ZY }QvVZ?;4yFPdp[fߺlxdHe"aWGA-{D = =^>袱vK#Cbt4Ҕ3xsϼQwT&1pD‹yHY)SD^A$):L!nA6Et# 164U!E UʘVQױ ZZt`M+ cHܑAH50Jbϵ5wJ7\mva빪{I'uY'ʊ\Pyv<•֕jVWGvϰڼJH]`Iݡ+++th5k;]!J;:C2~fD8CW9c]!Z)NWRӎΐ+OW* ]Y#(et(Q]=5E/hOo]5,=1]5w}5ڻj̫ѕl@W]EO-ђ9DWp ]!\)\+Dkt PZJ;:Cb[j+3tpum+f3+3+}QKɻS_.7"hc>A^f0 |{7/I0CNSSq!(AշwJt0(R>(H5e3gK16 et\ ?N?| &klY`^t%D@Qa QIz^m!@^`<ʻUٔ.=e_=*mJ|y*qceBQSS3:0QcBDŸNt/t{\&XKطZ ?22Lr/ՄYyc_!`͗[V_;iؔ" UT):cmH{(Zj|3I *oh-k7T7;CLq$Brg *#]+@kj;]I"]!]),J+lr.sƺB;߈tuHWR r ]!cT3xte(% [N\ hhu(]!]Yq) +-+kٻ޺BBvtlJj,'fթO'_vjTR]*z e) ]\A+thm+@YM|1qaBܺBWV43+Ιd.YWX "\K\+@h QʎΓ)|u})J5' "2ϳaHcZ|u72"Uj/&t$G e2Lad@~(CX7,ԉٝ#%kpݱ2-V䢳2ʐ\r q!l9#Z-gD)s+%y+l`.5+DY+-$.m9#K6k+th5m+D9gIWFhe\ڻsBprj=]!J;:C2ܝ+++thMA@)hw|Jc6v'NFioU3et;UTjBCt%sb=U2J3ttute!tp(FEtRDi6d0Ά3L>Ʒ@C_" o&$|y} %%o ΌA1=~_gtLb.r@L^1{&UʄA+Bho%6\G*a5qTGIS:NFH K+ {^L'xF(XhߞZo ܃ 8 ju0zA( i҅w{*t)1H~yF/|f_F%V{Z{L-u.4^ޅ|9[d9S?Ls5ͼdy|6ėO@I#|G<6lcu RЊ+)3yƤ8d1`㓧5+1m7eg1ƌ=ly1S xQsG.I}S򌅹PȪ:l}FRJi2GMBwo:~{RO=(JOV[Yg/O" {IXkՕ%?4 7lgZ5K0&Y6MB{^zz+] 3vVBvp-o0Gh˖0ҭ_?M%GpLtt|.΀+=])qSӭPW}20-\&نZ+hT ?+Z$GIcv!^u Zp批I2&M4'!/.qW̻~;<c \!z0u =$+h%dl7x(WC @_3ʀh(4]92pc,P_CXTglebUY94q*Sqz*h֘;=Q9=g.6 \k\+LTn 9XܒtUܬpuiUR"˚;DWظ b$ews+c.cp* ]!ZzΐP S5~ Qv[ψLMћ>1JF5?5]5+N 6CNTJ22 tt話G߭8(]!`K+ku-m+D)LGWgHWr&Ct9u.sdm+DlGWgHWRYm@Tw/11.^`܂Jq /(]Y;mZV**CVfZ9ce \\2-gw%s2$+3tp3[΀ִeu|J!K{8X ]!\]+DkE P*9ҕשv0#C8Vٶ4݁9Z]JO_\ ?!ms]= ]Y*uikw fFh%o}$gCWmPsjoלffhʼnf(mЕjWSFQVF:CWW ]!ZzBmI1ɤKtX+tp9saBuutř8DWX ]!\3hmOWR97ڲdPa.h%d ڇÈn_)J[3Kq_#7`˿6XY4 Swt|l.ՑG/Յ}&oĆD*K:\B#0!(x.cl)fAPjm!h`D, QMa+W]漢em!`ӏ(H? nGE$?}L7 9?ą𣟥'HjW>|r^w[hct#/]yKGߡXnoX"2a(!4S/>Tn7YUͧ-> `JʶQm ᶀd ]` iAoh+p]qC=hz?}n^0_ gX ҿѽ?wM|Y%0iwIvゕG$NIUqp kt<7$)|GP}SŞZӽ`W"~Dc4 ~EFu FV/O4|< c [))/ƒN&ۛt2@?V]:˧8^RaKN?6&E@8a,UkG|m, B{<b~L2ϵ'^m186̂KrCIã RxLy&'ǻ,ybc;67ʎFxr3Zd$@{8 +.ęAsa,jn!Aû7? 2ܡLk8Qz]]4U[E'oE^8f⨗S *sl8~GEo;!h_NV௳# cbmF5\3+lLw ,qCBS꤭괰|Zu(Y`pEn*M* $5is#!-qK-0]@M|t˄/_hڹUOyYm$·:$ks@wvb);:9:W耒ZKq|JFǏ*O^>Hw&Ǔɐ#ȩ{m]rzUa$q!%ĈaGQ؜$y6Dv4nU0W4yvM4]l# Z^jwi`p#QU C% Ny [7uo*MDp.37VޥV}vun^qֹ ^&z<2?d4(zu}ՃRZZ_4y>0a%WRKx/qC#d╛he a*uxUVhjJ-<2FrAkf%!MUj5]Zdwpa&J9ܪ21mvY6y6*nPl]^@OʬnO}T2C~clL^meN iY* G&0n=bIjIs=*'qGA>j$H%[!p8XeM2|@)Jt Z4ۖ5+Z.)o ޵5q+{2W!ҦRI%yY\shLR `(iċ3あ+y ?dH^T F)z9D|#q7c=pjȿcy8]z2 gmqHPxl;/zɹ6.P7}T&|>]CzߟY]i gt%CJ 73?݇UzApGP$uP˝974SGvG^@Q:e@D ,XHѷ?o ,XOd! t('IuAp@y+1hl3KY_Fm̰fOb'+Er<}ŖB{0흺([@J1N%9pԉEΆ\]SO@,WSJ-ŗjeguCH(n?KNI#;Ctmt"Dʘ &0abp0>+YVH>+_n 賺!`T7pKӸ,ԨY]C8ɦ>Q6tGvD& K[$y8>YQq gkn z+u9a.-E6;Dc2ωaVNB ,:Gj}ylx rQ>},, ED^9Da%#Ej92Ғh|ubϵ=YLr1k6mo 7+gboL3Va⑞_ gY>Fk>uomCRjM^ m!%uE]<&boK91YȘvɅXLn g e (Ȑݲ_IrB='<0!e'e ;N*1S0gy,. 2~j ɲbe>eS&4ͻ:! g;ZY-;)L:a.D5ՉĖ}Vg(C`es L gla yhZY-*k\Ƌ^qr8xd<Y6 #$8pY!Ȳ3A 0:<\OȲ̀G/@Qb\&R1I6)a0e\&dYp?pP멀ϲo 3sb21SNP,% &lC'\l3(1\ 2\7CuPn8"d>eWQ 8{;L! HY-LSbW8NF g7Cm4< }u%!^`aUFY ⌃ W'&#W9-8PFt~nIS\NjǸJKGYE[8ߤc"uy CdB H @Z&U8g$+xfVnqz.h87.K,ܸ(p?/^Ua҂eCF%G2T2%ePIqYQ"'<-D%eyZ*忝΋z-A&fi=._\Dq?حww_JӉZƧ*U/{|VCrX,20"*( G KR*0)fBP4}DFrӉjŶlj z aUj3Fyd6啍N*+کmw{,PL sXd~ƲIeY8n6pW3"pB@0 ]nan`J:XLщ<FĖ-ňP|ql_0n3!زqaNW@V`{o2*[׸d9lՑ<$ ѳqarP`-{tZICQdY-u}Ĕ guB@0`-{2k\baN8Yqg8+qFBF,ۍ19Bƍ8bzJYl"uҰ'+LI/`Zʁd ivXk/L7ڞDmfLq߽Tsj6ϲW"-8kE5$471,:!w S'dq-; 0i ]~˦k\7r>h+9wecjjNKdM ;dtb@ԄZXɷx-{8 r?Iv =s#[ͽ ։pNr(EJ*Y*ESVN9ʳU 2cX0YA.Bj5m %=ZBYz<2=kK,Q}tr.%׾>0MWej(z8ٲ\h Pnӣbm0~{̉)Mb:;H3Kj=WͮN ԰xC@3۱Rr_S'c.L̟o̟wrugl3_گחMX$Aof'A}HG_U~3Vr3j .ȕTYv`1 5/"4 T(=ڵhn9r3Od^M.O10ÇZy- PC ;lp d .1`Ӗ^TeTBH(+" "WKJL(㜧(wE35^θ[6`hdȉ,N-(ǂI DtxX(Yb9idtdNB>r17;a\@,Ըŷxn,ssrA guB@(gUQcf>;e> Rτࡤ/n9egdc:r!uUguBA0ˑEgN jY-[I=恳P|V7\gua̢eo9}յ. / Go+Ul4>/&OCחn-UGbCRHzj7˶n;8I2clx t92Ws^z%V~%0V-@/e95bh2z7Mɫ omEĔ8sZbZx2Q ʴgFGE]ʺu^/ͿO]\?N"`f6 իgq| gJ.noV;ъpPr/׫fe2K/,MeW2Oryq yZ@x8@D}+Cj7Smn"} jmoWue2S'|c/z1Ohb"ɧxJ0dajnNJ"bJn%5!(|ELZNd_X/A59|J'$\?=̖WspezL΍`|g/;^݈?~* \Qh'kp˅'7u8&rC<2^ T2`eR$~)n>\azGzZhlʁML1tZz9CHkT?~̈́KeE^*ї"Zyĉ+q(>xw an=T 1bLl6\PlX9ge~!\Lo~ϧ׳h_zxYF<>̏/U2sJy< Az2M\b4ڈO<\?&Ȗ9r7Kس/Mc6$ie2|#>=*e)) ;V.cv K{X0 !&id -$D%ۏc^yY6Y}{G'V[f[/_ǯ,c;_AdRf[C_ C8# 0(IB Z$"EYJhl>jnpvrx3*iV9^օPfo/C*(O_n?0.#AJv)/aby]LC>R緕t[j|x m(6<!$ɅrdE/>yu:øG8#0FڱRmvY;%/$T )H>-W"$N!sEVXq\̿\NiL`$a=5y@V^ag662FY΋vvE}Ke3>#{+U0 @ h>#PHnHf,*y58~X7Y5 Nr~~LSՉ&͓T҃t]Ei6JO' dlY4Օv|,Q%LЮyE)EEV@փ7Q ZC]g<=3F/l積ZH&ŕzEac@Pٕ~bbC qBԅ.0(ys'8= }\7=1;oi1!i㝄L0V%b/V;չr꼎SE?]_R1h ΍dE|\{ DMr$"tܴw}tO^4_Nw6y{`:A+ںߋΣ%`hͦҏdq?mÞV}+ﲍ9@v~H :֟Ƅ. +ʀ;|gSe9KŕqvSlf!Kn? /S{j/ 'PZ'ʏJeG^-k j렷v^j 9ɩH.$/+mJ0C3}kˇIv-ʎdS&E;b^'ɳ3~ԁNxϘB {qv?ϝ-7j Fk¶B*u]pqY9[0L~W촿vwQN a,~ &!Q[U(L˚8")^Oey>6!%BQ-A诫Ri*g!GpiT Q蕯?{3 [>e*#0lz9u39&(yaa-ΙTUHӂ2͕U@Ӑ|sr!zBÊ5RrQo !tVֆcpc\a.)b}J%GWba. SWF"\pF1KeK!]xҽCll?wwˡi4֬$' .YOx -3C`Q}2njC4<a@G{ޗ74A>/0Yޛƈ 0336/lW}3Lo f 0+|Vߛ92 o,d_aAHؗ#y "|: }P y:7fB *'{P*UvUL'i *U_i#c8Q*ҫWVj/p8D3Unm*!/JSbn] :lXC)S\hEPz|aA̐1$VMJg J#-ޥKCGl'™5cޘ2! 2eLԻ/Ƹ_C8qGרOP>uGYh)Yo!!UT;8H@)ZO?=U>ĸ= vܐr^ ƁhܳJ YѮܿ,yu^U25w'S,ol8QKp_\[nטb`&l݂ߺ{TT)*O:lxC{պs}P Ejc :;#_%+gUzĻ{. Rk bAI)BfT0.vnyPD[ 0^awAPv=Z!QZ8Gvw\\u5qݫup!! a[6bv^:[(_;H6>b~/Lxd?Ԑ UL8V8wo-A!I$gpGRAEuRe K١q.Nظ cA, `o>'Doy@\SJ-} q믏mzYdVTROe1 p72lQvE'Bx@R_ z2QP˅`uʠtN9Jq/2.j~/. *7 dIme5L)^=0&BI|\vU=Y"?Vrm"k +Xqa㉜}4zvO8P}X5+S5d\Q#9wZsƷAkJI4e0{oDQ=׼,HojGYcIkYVrQ\L'f%m7od]d+B/o0NbalLPh(nU_S AquY#̴0$+ qZu1c%A ۜu6[V%Lskq|*ƾfX kUaK`:l;GW0W0vfF%pp5*8ƺVՃPA< S*agЖdR@uv^ HT+wz?'ڲ^6\hإYZi |xl~y'#Ԏ_Q/yBģ;)"\MӵEФ2쨐 3ɢ\fFfpVaZ]6PgkɧFwR HvX!#m@S *^N2\ncA3keUaKyUFiqy˱Gmqn/DGόwy|筹J2@tA$2r rqo%%,t$i{U Tr*w4+\*u;|Q!,\Zl)^vӑ #HM4JUA9Le*7rjBYᮕf>_` ܪ-$QPLTt}khRb.2T9r+5 hwx>h+e9 %|~O5Ak&vz& CdE__/Wt/g_\ӣ'o=*/ٺ0S#HTʨ}{fgZ-ViHH%N5Jn_|75IWEЧFL/*9l)x|H,y[*:,E :{fO2@_-Aͺʷn$0xQ<*^iqU,4nMUHS+/Vʼj1ԛ螧13GKYﭏ뺅`; v`rŁ[4 3i[@H&v9WNw5fu<;U$0>`=ԇҒPzV/'9bR7b~o {7Ŧ"M[kG=t`e9Yy#x߾v1 C}?ڟ_;5@We QA i댨S$@@AF(@ ֦uW/ߎ!`qҋ׃fuO&\miIAHcgo~%Kzth;$ȧka'jh>X_{ {NP~l/ Oy)*e߶L嬃 g~uW'؋j!AiwsnsEsXy ֵ_K- tjr/ )0*&:iŝd.SH)E/SpGs eG8}(#qiqΊU}.^%'gE0|s:\P8<Y>w1J9U.ݶf$9 RC,'2%)5ٌ0Ӟ%6#t+S:})m1nC]zտ`oNPuqiR[]Ҫ5>ʙ^9EN؁S8V(6d 0))$GĒIBLN;D]6+guziGRΓ;zJn2վg5e UKNH\Vڭgy_=hWj1ZJr1!\o% sL1jv"ZFL a_97[,W__֋RI=^c 5O&+ʯgdlz?绠rZ{_N/UcqV/^Iu/+폫z]ǫK:R0^hi+1` r'8@-W&D3H"`>twh!@rt*%\9u9i궑f s)b *mgmpA2EtQ`Mz5:O k& Wk8p  FO(࿬5UkTGX'VCyj}N|n1,AOq]VdB.e_#r+:Gga'ȁY[2%`嫷[s߆r8kB}aaL'm9|kʸVM%mE*YYvomq΁_Pdop(bb YBL-166g (`*^D K.T}ݱZ]+fQIeR!?f Ě܍`k٩!A}Ј- Qc&NUx{QHըkq>@8K˱P #ZhC2Ym34Gy+??+ylm40;|X4bXr>KG,X%C[6 OE*`$_G66˿>WJ"X{X͐6d< vypͩwheohƩ>I:X[c'j> + |8.[m+2HSEl Xѽ] MC":N#+ѳd4i}AHy_QS ɷHJ B^]Kd_ Z"u"ȃU)IV<ڣFSJ]QW"QNUn{Z rY_]\S!GӬ Mx↣+?oߖ6{؄\f02ݣ 5ema7 *OjMb^Ta y^(oG%O&7#qі@j>m~z  ɷqxȺ91ىI)[N9 Y}.:O1Pb0_+po6Vr5mu.p\Í <bֳ$HRTj Oai e9%Q>5re RMŏ3OٽZPVK9kۯ0ոl1;JQ2n28PA8C| P+ ~Jҡ3VP 6k JDO HkF^tkɮ[:F@cLXt _r"T2 Чla >O!N3c#uh%6$zKیl7# QT>&! \3 LdLO a0Xe( 6E$~ czA\\ XGf˶o#0Qᆰ[wl JSmqurpE.bTD4Nd"ʣ׉.]. ~{3xPFšEp4UQtI7_Xd0Kp)%sF,rEU.g@?Y.Re8M^1(]ow=_G7wEJ'w^(AG qV?)=)/J`Ptnk4%fkie;Q[^* J Pɸi!Km"8qqsfyfd&9?BZ֯4h&"QYlqo4d8[=)I2~hu)FGh:qg=tFUBS *}\h. C`R@H`X]ȁk~lk銖=-9 qe%WHy T)d}#j@ȹјPU(4]IT$tDT|,¤ZS2Darѓ'%6+O7ˏW5ZsPwsnj)W\0:cdgG28F?=?{^#$9G*HJ?*aľ[0h4EgNk6Fb$!4}Ϳ8-bgпv;!wJYtNޛ5uyt| ]u>t?0dOFqRn:3,/{1 !+}NxDsjD~A_[bRc4N]"_;(\L岬Y5+׶f֬\kV%Z`<)\hR8Ne&) e,)ֳ.ZFHcF8+_B^ŗG\3LuG%c8Nd+LD5qB+"|Jz,rYDb((2͊q|ѡS/LaMyޥ>Xç5(}ٵF}c vYN1|k7_s.65[cs= |1-Xs^ adw!+j0I5V7-7&eݗ0S-EճdmOXeՃ25p9{&9 q};We̲,G3)n?XZ0I`p9y1bj(6 3̓s7"H>f}i9Կ5i1ιmۭi\MӶ+ 2CӊBFY JpN` jcrpc?%>=f]H)>ϯ׵;+\[o4oc/w8a6#U|/1Uж\Zlu+h,1 ÄyKa32g߂Qc.6f>OCvNKs+{Mq\tZ;vnWM{ѶV1d+5ZXL{>13n^H)hVD ,*Fc qY纻%`U-:L)%lEڇ½ .((eV oiwi$}}3w94XA?9X3д<~w L5I(epU.wR80m E@NTE"':R$@Wg̝ r Dt~ˆ ݝچ8ƒJR| RإJ!D EKui_[c֘..PS3'F<˸JK17IP W Ix&HL LR!5Q]T}6NG/LQ}cxʾ∇ޔ{.i G>ȹb_Sw !J\pHyS+RXnZt ou?(27* \zd$DՃQIZ~碆tAҮkJh\}{8~r -4C{%)E*4Vy.Z (h"eF(bj,8i\pїprS~ہ.wYAƒZIՖԜWgZ!drS|Mqy2ѼB4:?Qc!6鳘urYo,Z5Jv.89k u U^ ArÄubL2ՉI}kY<F_{w3%lG8mҺtǴa aIsPuS`ŻS f ,$$Fe8F&'ɔ+K݄mSVqn+RLgX4;'$%%gް7f{7(, M(KMTP 8eyU(pBi.c>њgF'^ad <:[%o_c|7 Hh5Ȳ@'ĐgXR$%q,Zi l7$MDh<|QZhrSzw+Еߊl˿}x:f) mv.eEj!_g"MƣLI$ii"ӨKE9Ӿ&KQ2iIMUMI΋9SutZfJD 3XQ Icʘ`Kn`ʁV]:c1j%L1IG)bgMgPJ놜}pynN*Mr.ƏZ {N[][oH+D^.`f`b`s4cźY/`dŨiLwf|Ӕ"8#HYPqF\7Ϩ8gR4Vyjq6Oa;taV3Q;k4ǫxvTı58G,Kf\hE?+.\S4Ֆ}B{$!i/^M;j+AH>7 kt4>Ӡ=Z{TIߧ6mp_;8EU0x TbymdBۥɛ0kd ܽԦGw%GJn 4J XNwg fq$0"H##i,3(%N~g lz#.`#/ X ң(NCU$MˆcxhG ׃.nuQʴЕĵ>wWAljzOf2HΥIl$4O+ _cSY`mPV A6Yaؐ65&Pes66^|LϤ?؝lṟ_jll~2=TMܪN "6U1WV#8F-Üj83&,HưU {z|99yG,!GҎ #WsǡIGh$g!1gs,CP:Ņ-8 $ s̚9g>2Q}>M?,bFp2H(Ϥ{DiﵵC7TXtp3e~>{PA(*N7F&ihu5?BɚPeS*\ AeSs!MM uB@.>pHct†5*G](#:9r# ۋFW8s:OCwyf V :ElV29*1Lő!qk⧙("6 9=~v\wztB= 9pn1#yư d5s E<n,m |U+w`Htz#G\`g{D!wlƄQ#@E)pR$3C:|iV>r3"6[#0$%e[)6r)y7 \7GAb= <$gN1dbELf9Ju[|G]nz#{G!jܳ]b@>}bZ}e]F*V ; MWʡؿ W]+=~ Eh E> 2?*ʔ 1@E^ji4SK[<3ł9*PdL-MJI"dhL{<^̟d:dP&g<G(Q)cXD9?vtɔ]4X$gF`}'`V*h  LMF˒ZN-T3 -.EQMۈΚ=^]jFvyh >+Qmh ?W6W񺙠7OO6 2#D>Sv)|M Ӟ2m+͸Kߺ>+Φ+iO( g(& }4fAJZn^Yuz:q:XM ϕkdl?T$wM cǖV~(aeo_gn2*z;GS0A76ƒC0|~M~pcq!dWGol.`^?Wuz7Z][}awѓg~ū=5M ֪ kra1%(+0ѾWQFӨ8)Ľ;E'66/)PxE_ B}YÙeB|a k`PSz0ޣnoP4jIRpoQ:#_4m#hǩ,ܼf9N-ߎi;(&_ưC#uTY3Aێzj d~GCUqA#Д> Ӻ9>OƶXwR4l6/?Ej~[|[b .i]j~-WtC^E1U{iHL9 1б&!(yLC*FX΢e<͒m$vu{6ծ.F(v7ljt bǏo$*$(o̐י['1vJRE ;mYo>D.%$޷t iݭxr-ZH"U¦Oke}4 (C"{둈0;|L'/W0Il$4O+ 7TƬ CT<w[+ߺF(*߫uEv pͻh\x*%4\ss.e]W?7wphY5N/^ 52OC=n_{q#kdbѐ6poΨ}0P}@z&3G20@0f'B <_lf]m@v}hd'}v)0[{BpF!)"w"V}BBڕXG ,bb 䬵H}B$u7H UT: τSe\OȾ˦?(QUXw[,uv3j;ΊwXGItQXQ:3Y+Ѥ' 1 b9欍]Z8ͧI(iaLyg\Ӯ^gYWٜh>M|\\_\6HBF Q]3Ǽ٬pP~]?Զܰ=&G^4XЖwAaWFЂ*'N]pzk;RN Szk{[ol|Ƌ ckP\g.fꗢBq%tzRPT[.z^)&.^;E|t}nDQ\]Gp%W Е$=ɕg8=1 I,zt1!E #<]X1F1p-M1&%2N4ibLt6J~wwRD <9<.T2Y RRZO(eH? (;XVU{HG; kzd4>Lhc1q_֢ `0XNzCWC o/BA' BցEXLI,CUמXt)L*Tn!ՠTN8m5w)mByXN{X}% \=ԋGN H+ JՒ! ļ^IZ Em*Anےo]d? e[?{(uJ[&ۋm+aZ,Sz|,P@I8dЛh1 oBU{lJ1Eۏ#>b\ݓ'EbҀ1 N8ά@V << v`IA52qlGa9kP:(e՛ g?w:ڎ+W-PwXBCsBOIl@q C>vCA7buЃ Iԝx~nдR8*:#iTfTR. Vı58G,K@iwY&yN!Mg{$>ZRM+'^YF4@<. ԫjcʯ?X^Xu_1*㢈s5M ֪WCYUՖ\/RJy"4Ȧ#IG8O(d5C52j0EB#wжCB?x7 (ţ#wTquY2-rSĮZbn#X? V~4Bn蝣B_Jn: ڙ8^f]+66FYʙv?nMA8ey8N1%cU屵I;dtvtʳ$B ./6['Κ?QT/NB6lhz[ f ezqzR#;M { [ \tfgfO؋fXy}|vv1 hUFqmDhlTo>Tdž:>52ѲT-u}§<|&Z~Q>V=k԰@x2wQuU $H$B7m^J9e7.IgKx*s ӏ&_kkddZ!cEllG9L}ߦԚ&T,`NM$SH I*\y9 ;kVʹGU&W#Td cjLݬIpkc7Ÿvmzȧ$X @M|4m_K{x}e!3dK=3"yb(wlw $kyq'u1;?gю!S>d*kc2>Y^ w|ǏP'Aɾdn*w/.l |o8;C[V\}pB;vF9!걗KOx'v|ǣ>:C< 1xkQg[puu#`3t6]<2 7tnc!0*}A?L LYDV8䡈` =͑/Dg+9>>N>ړ">%>j'"m >:nSBmeP0fgs pl|zJ7|8Bីػ1Ckwm I5>Aj?yaPR!̺CjwȬ&ʑHe$<6x 93cb<(,Q}bΑO{ċ5/#JWo.6 aG nPBIaN%{-FQ&3h[Mb`9`u݅Ry痽W,E+NYւ y:=e\5qa"L*ڞsΒr0$45WtR>d4jMŨ RsL9A3s3mc6OG.b)nɶReQw۠/,aɖ=&[v,6/ks,}h\r -lhTn{KpŎ+[E}gT뀅frY^]RM\N`s~ʮ˷y5U 9נ)V g&1^4G Bo|ߴP| j=6/%٥Gw~?5`u1#%tRK{9\{$zk[-͐ 4m]0V}<PAN,`rm#{'luFPg$7v()MW| < o]N\Ǿg!EZISjPDNitZK3117&{U(P+G)y!$<7R 6;J3^UTZv $|y\e"Ϥ3_2XjŬʨ8WO;C8!Qxm\>rS:_3b/ưGgLTUs0E la-P8܇J?F:lsNKƄRJMNydoЩ p˦yj:&M9I6Nz&YsGye)"/[zyv|c菈C \MָPaF Ma)'JT׉(ؠ!:` g +VM'znа1뫋%Hˤ[wm8K46L!O Xy:WdӏbP Z}dOO)ke,;]=lfJam ϣQREeFVr@yT5m_^{׿8?; k04'x8 h2aɳMxJٵQJc)T 3bT$J&x6~?1rĦ"=K <)y2pAu\ԽԳ2<9`I%_1qsBi5x1nI9h[J"o% =JhWxEtyƣ Rc0-4ǑXRw!um4uti!F ġhO/yzX󤈀%M&9PZOI<o>xy.[@xKrlA ʖb c$wtm-韦R;To^%?_z=n4e,L8ӓzpF&ޟ>x)l#9GqH'|+{!N;MAv B |wȠL̸":mQwClRu}}-p lѵ-b=u8}5Gƀ9Z2LD[ƺCp(TRݲ!)6#^35[-YY{O FJp7zCŢmSA:6DHyƞ)u:f`^+hhM#)uo -pe/'+$@5y+,i8gq>o3iZ=qDŽVhnkz=](^kw. ?yv*ߣE~b-f-jKL<~#w 8;^wI{4c ɫ[~2Ɉ\pg2Րُ d?,B@50a)~[@0PJFX|V{@n3 +v|cPCr5jA.͗n󒉙C:jUٗp~rayցW'KH'lBiO'K(j|䪑{BEg#nT"cP@Zo6e〉NGZktM^@8o\,j3?,_J6{7~08Sc,ϔaRܠۀy/~^p";b!o`VohJJCyǺRK#U WR64-'P<f EzZ_z_RZ?0> ꤊm g*)N]5)%xo ٻn$W\~=6F\姙ǩ̼p4mŒ7mP7"e&2+,!KRj!;ЊD}\cB=yv2zj1ܓMynaRmǔÖ$!Sn 唔C`HLQ(ŗ‹BqKm@} @ 75cT2BSNyທZ%>ZgsdO ;MΣT#ηM^iucѭ _U)]I ggKiGPԋU4mN^ixUnσ `pq>m^v^pc"lƍ_x?^\^_{ֻ7;CۿPi:{uY_y7_T W;fܧ~y)&Vޱmҿ.xC_pg)S |@_>?g63٦f:z^DxoAyAX~7Lch|<;oJs`{,!w*9pj Ф!Vm 7H- g6*&F1cPVJ'fZ =2<Cl6&Ҁ$ib҈ʉIVdNk!4}&Ҿ\8OLٚvI8yuz:I/~2yzqFv烮!е]b_3Pwy_R)EhZ{.[iQi8& Pߕ7\5!LFc2f軻-Uq!×0޷5mL0sR`rkKt"G:y'1$]hRau5A`kV3?It=ci)m3T[+䕅 nyIܨk zw릷_>o'0Q xb_ݼ!、)ɶL;#H-{ټ![ C:K+]Jql^(JV{.ԫ+[곋ue!*iM&DyXCfU`qoRhԻd)̒zÃo/Ӎͺa nVo7M^zAdagv+ȇd}F&΂$ZX[zXHM*U*b U+՛w,-չd̙x~Q^oS4d&4c .ʋZ%2'_O/d %.Q#gFFKs\JӟO錵dȳs@Z7 ,mZq'gĬ!i 2EyJF1:J9-Km23i核E\ $?SH߁K5Zjt{E} *͠y޺KE&͓qpO}?208➥4d=g=/5y!czD=*(ߕ r3ݜe] d3K9Sh$Gphd5o* d k@:4Xb4Szpվ#{3R^Uc=` 2mIz`O5bmYS4OW%X#)M9C*iNjw$43p /-u RGl1g0n7o 8ϑ}>JZީNZ:\f:P o7ky~U~ճxS^hM^ 6>d-Tg V=i,!HbH>RUɋv È0Z#:?&w|LGߥ_uM!}O~l#Q+`1y|LO*N|&:`houmb-cwٻ;EgwߜT^T#RO~yX{2f`2)u7jNY} }w 1sыB0Kҹ*®m\+{`kU/]c-=BT\ wk2Z7cshM^ {Xqd2h<+)s Ӡ.Xs0M WÄF2K[fBu %z?O%)Fo3J8@b%MytS[%h`[TغuޚPH"KyP90&H~T/&V;#O&֡Od>ĎAK(_/uZbSq FIMr-Ab  &gsi LwĽݐ9vf@t1r7>+}O27rI΃?D[1F ۈBc1,#\pWX 4e=w<7&/DI ot22ww|LE  g{{$ﱩ|{|6̜}ɜ+uC.qx 'ͬ=Vʏ Sه ƭP4fBm݀R=T+"J-72QyngKj#pAL!{֧ٙR1ؽj DIZ}axu*[yo6-bd7<@&DhѦ"0&jy8ǭ<ШKZ2fKLkh ܸE|`u"Y6xY>E}miPV9`ɋ aP>w8n;A{&Jq4cpf{:ڰwFEt,"ԩ(@FkQDH 9C|E؟tz*MSXStM)NmJC,!/PʈKc 6WQD~= sdiC^g x ǣ!=Y¾ڭE;o­FτMro<~\/>=#[ ѽm3R|^#4)l`ģSѩ1g>_w>= Q/?f "-f"v47)DeuH "KTyYʭ}]+)$Ŕ5̀z H{꯽ @#M^mP#v@blWNw'uT3-Crˇvy~6 ˩#*пVze:Ւ ;$SOI=ٛۅ厔 [Zv#mq;ҴAjZGr;wq`0K9|12[ :n(<0Ko犞r d!+W*PlJ2Ivٔs\ R6,y)_"뚯K g^=J®=G61j;Ȼ+n~[b?ǽM"O[hvYaXX%G2X6y9f=u:B9֝ >Fs1' ZRCy5Hvč M|L<@[EFKƇ)DM=Yqx1iW\%Zhgd=s=k/h;-$ƙTz|pM8Lt=z֮oafxrLᝓ!Ov\L0t_uՀ8c0 8ŐÔ+"ne@* j' g1kdj>IÙ/ zb<kq1SxVd4*5i#}A1b}p#}U&=m:+&#nʋ;#W1l} -&jFF~P>}\o% xA)D0Tl:4avL}(~mI]  Ef|s {m]6Ӗ&/ "~]v.s'5@A-(.M DžY[*hW&ȑ_],FЏCtͤD]]on+F^o[,Xt$r<\,ҮK㙑㖴ԆV)ybmD4Ρ"hޔPiRK4dѤ e`o?N.YņKE9u0&W0Px{H")f(0. .5y`{ jgc5jR;UC:Wڇ}Th9r^f:AWW/uz9$7nm7ۅvՇCkKm>7}De=*%hFxj圷O̼ܽ{y`%JeṚS+!8C#'[|uY !7 [MVB=c")K"Gs!|H 3+!q|4ZBl:4;j dq|p+[!_s[gG<սi"զgy9<4No> L%NY]$]Nw^rX|lT uw?9)ʬmyr0}3}z~/}B轤)B,"S.L9p@hs8)=ߌ7 {G#l~8Iq7wAՅ_ݡc5{ql:tͭ*4z8RcX;:e=?3;FyIN#wlGZ3w%vwh@q%v %+3T3TUx JVbwP \NhV \J6Ќ)zI6R$䋌sC1VnR RZQl.~@UAE%k٥ȽN BmEkOE\%h1 [@5Gޝ(901QVgs0yQs~ 7ˬnHe(nЀ$pڵJVC&"HD)0' -맣W2 a~z[}C&QŞѬCU@`j4Uji|չ7}T[ 8V)ɝt!,[59%LA[jR4l5cӷJA7GiJp e'BMC :ӷ׀R7lsP5՜?|61N% cY\ BfJmZPK6)2yK1XX\:Д/G3OR/ f$m-+| LI:#qb%{gI4365̲ksD 4ŊhȲN-M3VY-r*băLkW5Fh|亳}7uRR+"dOhwz7 uŌ;d+o{w;Ex# Z8aӍJ+5cQ>U[40: .ۛE`ɦ FN`D1"] kGul1u;eA.[`8 [+zmu~ NiR=ߪ;`4LYLwr@(NžSSMRDDɱ.ȐV ` c쩆-AXU_VDBp1>fW1G{3u~[Ch́uc]pZG0 z\cr,&Ja!Qs3ݟn ew!\<Ǿ~rfeu58ć.\|>.yDɄD4e6y3X[Sj2⪥}̈́sI\:ok`, 2&?&l fh1̱R9 }XRSPoB 0$NHZe cEZf#J0d̨.;sTR#d@czUM\#ls[Boi4X~ 7A1% UYnHǦ3 LRז#U 6T)ib.7jIMc+m; sZ9b%OvQ_@_y?}{;SݼFSײ#^;*E"1Q> _ 4(:{ϚBs#z@rU/vkW%jBI4'LMr,.'t1c \/Lyn˭i6<܍g3w=I_dT7~@1ÁP#o m y&eUq tl` \"yZ$0Mq([Oi~ǯ;+Dk4XE42aLfeaV]Fv{:rՐ}H6|uhH-䳘F&o0nJ9p!$UvsKPA]gdcT4RM.ݢZQsF-ҧͮTS, B7I^19`GsBJ*jdWKbi/&ͳHJ&lcI>T[&\$$ jUbn&zy6OJ1$jK?H%9[f.=WFemIfr[,?7 YBkכh xw 6iɯYkafc`;%DOK}76ֲf1)1@Y!%g/`.F2۠PjT1Z"t5$df4Rr#K̄>b9Qˈ)Z5d]9 \dWܔ {5YT\Fvw7%쳏 pm}rV`D)Fe_gtۜ> own< @0z;h<4{h Tv6΂;>d)tk'⧛s݄eId"qÜ j۴5ׂ SjC5hI.'H$=eIs95?ZN4DtLf.X S$L g~ei$M_i0(kdgf (}3űDAbhΫOnr3SP} }֚%~WboLD:`ڧ1`{)bjtwD'%僑=q!k5.}$ Bӂg^5sn/|l>2 ]tn$C׌,, pU*+[{EcSSD({KR遽RZ͹d-\#JhNjCncIu U&+ˢnXxHsB5`])%LS*f֊8i\r,I\U%uI^mϷ5Q/n"iNkV׷cB[f;UDُ*Zt;aچ5.0(mn/nיq1>D]ۿ^P j`6pnAx[R{K2o%4QKf3 Nؔ0:`@g=ڈ~rl#O# c ٬Oiظ9w; ۼl(n4ٟi GKW4Ae\ ;Jz3[.~>@vjFO5ֺy07k#{wƭ͈ft1 f?:`oLǧK,'.x.Ks^|lTJp-.&ns7K7BLm>aY ]moG+?e*$w8cye˒BJv҈3|pfꧪ$%i\9,3qKci&6xZGY]v@a&@tw~4Yb>Md%kviH xOo3Z{cY݄wW{7OFF2ia,씽} [4rDN dhs(b/nut>6~ǡy@ұci%e,bO\_V,{zTd5՗Qje)U{Cꞯ]Ob) Ѧ !gAޓوZ帷zbaz&^EFoY-VU̺8{L zmhKle)boA>|TX"bN(o:b;6GI`9xmyJk119q[iųy2 O={.y=^5u[ d u"8{Bzu7…э87±1Fh犅iYQ2rы^DyC|^qz#"Q<-#v^φ#xJ*^D">`uNia)N"0^D"o;L]ٚ.\嵊&94d7R|Nm3IjC,<'E^hΡԆܰG WG.^y='|qwpyu?>~3%C2EI+Be2v!zҲT-"*t  4gXJTg,3U0Ңp/{5APT,ڧ묀\6Ըjef]y%:ʠ)X"Zb\钘sV!VI5>݄:SPy*J(/@bQf^Y&%町,e()c4J>tUCi2 }P,t=ٲwgWLjo6 LLbG_Fufge@_wwiJM䅲2ʋ 0Fa JG0>V2s$RqJ~ PO} NqR;oHl{`P(&RH>k@;݁P;l,a5eXPٴJgJ Dj]}C";rj'0D3V|]vM:"ͽgW3<*pX^n}fy1j} ec#R}Edr6h0W>o1y(NH^9#R  9hUڵ72db]vwJdF(-0"#[`؈k";j'Ei3gCUik\ s@ht]x\vItGXhs2bPͬCmej fO0}} Y~TsR@Sȳ"U 5PqZHd ,!sLy^B4^3K!wiQh){̬i*9͹ROL)d@@P\k1[8Sg|ogg|Wt<ʄ>F@-{BmqvI!zC 4 UG;!k SSAf@ˬ/F?ͼ98&Ujjjb͜UdgxEpFSxpsh3HH/z1C+z>}{fǟaNi>z9rxU]ӉQ۝:uu6 Q>+rTH"SǷVA yfOF_n}lD٣Ģқ"c"%o&jZGZ4,Ag G7 HdIҼ̱߯ ^x.J@+'ӢJbN"7d3&VXnv9D^R4K\!6yvyP+UܽJ/oD'6n˫?nKWʺU%ԶQ8'q&˭|IISxȒ3<eVu\O-gҨCN;ѫm `◁7*m8< )HF섾Re'B 7p"#?>͙iey3&a uSI~)~w6z[FM63kcdŒ4R,C_֑jx6 ;iw<6ף yuȭ3]6fqݽ."pf^䦼H9sûǏjۉcX`(X ldoݭ<ޗ&INdRWK!]7xѤnxf|QE&w?׈˲Q{A>'YlRx(grxNשxQ[b'$w3814"t < )jgc?ۍnV{>,&](+;﮲~ 9hl'!$^ȇQ-y\tⷺ5cg~,SeBg] #'Q`Co^4vp5^\_ez1~跧H&Jk1dL#9(Hdw,f)K/]b RI%*+T]skӘM]Rz՚Q"EcGGEi:aqL3H|6u󌣄4\AFLMˡ4 h2>Ǡl#7duFj ߎ{&A>g;̝6]$ {h{Kݎ$#U`EG oʞc)ɊTH&0z ~pn7fP$Oti_$[\vĜ+Tb?_w  K,,gPpF^ g,ڒ\]Җlk\`P2/"Paw;ͱe;69hq|Z6nfQwSe6io<eh`+z2HEM[xVWtB{E}n{({E'{VoNh3ed Yl©W5( ^";w>IQV}B71pUvO+3 47rћ_hb"Ƈay&{;x߮?>^%~L`̏*nogYf=t$kK.;TtF0 sgۯ2>~qݡ?׏o4Zх-p,b:6Í[W'uNz{ۚx7 =)T 0Y3'ϣ1M-pVEW߷ȟod^ 1C(‰z/ն^c!bAP9?\Fi OfqX8~.+?}~gc*JO0`+p%.|((ˮX>kky[GٽsϞ /uջFK00)'w~^#@+UHeAA5I‰&6xBS~Kݞ}Mco-z/iwWa*mxn2mK-t9\%t󤠡U G>ר@aCuһ='ADJɊ2ئ$)`xp$;WT&/Ӳ\kŐ5j)?3%m:w7)|Bpu:p8#;L x\Gfs#/+E—BGyнG,%\ [}ۿ7n/YIO|#_WZl}Z*s9Y_k<;NS8&O+=gx3]FD@J!؜"0!%("WNd"nQr2Nlݔ>mE|V0ײr~7N?;L2j͙8V=V)ZP5^7ͧfG@^nn@ օt>!sXl43W}siB^+Ll>]ݨ^.[r< |W3dtW(h.? [n8hG_b KpgU&~CRE;zuv٩攝r N^ ⅻ`We\Cߔ5G%Re>s(w:zߧziY`94mhPd: 9Sіϒ{YzWJ*6Ɂren|%~#lc7q\,Ko/{x?A^>̖~Ay rKJd&Arٳ!3!$$+p:8m"l"촵$R'b5ڏu2U=.]7pɁcTf^1Di<'ALhbf@B`bJ 1KwmǴg6ƕ^yk*-JNJS7LJ$3DɈ?cRoPȌ`dx FQo\9!Q6d1!my BTBaLx"Db҇K^- 5ڴ<'UaNF^ q>Z$I>УOѡ4oIĹę J=5,ocDd`4³ҫ2YMפۇ}O64iLZaOu5RJR)sz#. L LY)Y 0V ND,1-T:O_;fϒ[0!;`*+3)Z1wܚ~xޙ#=/ c6ɨ$žڇ9 ID{D(è)'.#*ѳl`5ΞGXܟ4v_UحzG G#I$"htF+\|0Z3\HhX")Q2-<ƙl'"` ;1uQѸ[Ց;DKj)S'}<{pr,$Ѫ3 X`D!GEVQߍuE  GY`D&[-% +9xͅ,ձu׮(qJӄ'=m>Gm#l3hvIyS=;Ž"!¸D{e/# $Wӿ U:7}xKV+I[s]ʷ0"2ήz5p"q.o_OBA>xq[=͓&SU<酞4"7!'* ttLsސpbd(1ߧ:siҠ(Fxs$YlQD1q$R$EMvF͝GU$dBّZCk%U> }<9sohgYe3,j78E >N%)D%?KAX- *]ϓKX|(xQcQɣa 펵sqlE(!0J݃v"q5qDZFƳ̍4 p0C-CcA3/-Щ>(q$胹~;¾!#l1&()̪yU?_5K3|#DIQS9f\­UxrvdUD3Z]E`>7#uDrr tʆ|k5/XFi+Z{$_HQyW"Њw? cssgD p??U(~ܳ/e8eJ(.嶩"j`7]\_$R%cZj5?Z!uNhr֖ |2)+kޕs| 9͖#ÅSƴfƒHANFY<NwR&ȵU`mk)?u"ѨW^Ǩ"ӊw|u*թGBΙl%II\:RFq=˵kw;^O԰VCO4{TPfPP;v](D֖&Ԗ`1|+nځЯv ؉ځaG>{.i䗕$+ |p6ZGT̞a!X]ά "Gt2QȨ*> ho x%zFȚ)efKgU=˜FD!)Y9`E,%BOFFp1$?83sg)%ۙ׫IYH+p+/cΗb}I# ҢԤ 0iN@RZ,NsDk9Fa |E[51|hX Yu<D?/qh3 [?]=k_w{ȯd J߈7>?i]0j WꆮaB77Nfg_Ъgbۏ~uBf5`}ϥ:[M\nj&.(ý;WF ~+6'ރH INy5\ :׃4lMߛ_Y&`⦘t+ VݏHr2sK`y 9/9uI4iNt# q@ DF<!!sdMj xx5xs{{=qR&^}Φgok-ƓwR+"~r~3]՚eK6)[$Rp|eQZ"V# R=OAbO="E2oo>L/s]8ͥWKp =yݛO^azwaZ:\݌zxXrgh*̇Q?e1~3~#=ndZrxqF'kk Jh)N I@ɒY ۇ=/=ڪ("kH\:|XVCGF#K=ȰTA~pLP`%ܤ azf `Aɹqn\I BRQdfJMp:IfߖѣӳRʣIU`11]FHypu,8H;ȃc;szj:_NUz{]hA?0Cj_ަT.dI5.U^5v&6Wfw\ͽ-rzAqg/_RWTA y[6䣉Ch*to1R n__6\oQ9ߖ{ݤ0ey:c]d uo_:vkيٟe0}7GwA9e9]SnElWl٦WnfZVl{J-zd\>ϑ-Srv:e$Qd_z6%Ym OhLHJ1|mZzA{wĝc8#*Ը .b WKqV e\rltӔOh_8y]EHߺO' Ɠ;Ggfΐ`C;~9^|Aߨ=TQ pϳJ3q/Li%WԖ]7pk\(85y/e){ .?$hA]ɁG|azԱ2npMETDLJܖδIgS:JF:f T$vt`yeL`r+[mMےőgs 4H""c^&1dgեId^*s HlT:9^\-#bjPTJ-E& cDG49 1dfyRy'I &0lupb_07"]q,h ]9X} $n7γf^# %1)  P&#I8@)9L̙2#쀡*GUi_ }$q-ЙYŲ):WMQ%ϙʆ4q%\I Igd\',Ǩ.X}E ԎuE`EXJ֝dXT3Ch1 &CqHW"K:JLc'X'7iY)?fN|"%m+o#]l=]sw3]wXN0w3IrףqSJhC|䃆a[/uJ8O+{q%fkyfl rb$Evw)t;;Dv=%:+j̿;uҾtnYmrC曆0b,.g9iQH(n^~ eWfL+gCT CV&D! E`X=_WYE*th6)FTJ<'"VK-(eЉ}4?Ʈ(ќΣ3t>-M}鑿eS?+̱~zf:b/x6E JR|Z70_{Y6xbdii—CE[#H8 K`MKfO {0('MNsvТg,gߟ>g?ynţY>=:Aճy;Ϟ^۞{O +i;}9N?N ?UWk|֛zuٟD<џ.=m^20qZ(9~/0Ȥ/o`?ޒ /m~ivw2\ g_ǀWN2ߋ~c ֲt\-OerXOL\@W}t&f~\.//=yE f|?M|Fr4oǃ/>ӟ .|bS)LOa??Fr Bܧ؟=/G8bT[`gKXRh9C/15yїūe |\pWۯ8os9io4aA7~YO.F@ _ 0>hO!$⧢4  ָOoFGًa.g\\.|{&߮(sL˒*_/$V|r.LY/V.mzCf[^I{΍.0 `I|\ [ b)=^33㴓}<2.aQKINLֻe5Wq&Uw';.j]#LMU.ќB]xV২YˉT:tʓ Ok"E[0P_y/~2tE=o{n78>x LVTe~&VNi*]ST9*wT%ˎT9*[MM%|Za¦{yR+RNmnp&0£T$2R/-hC1Lo%)& )]ʑuɣ.ytɏOӣ.y%v]rCgIvg[tI!c1s~kNӖ*I%)\u*]r,u:NQ6T~W+V56-HIϴrô-ccɚ]I箰yhcm;dc}fu6uEŷXȗTGQbE$@I*iX=ή~ghZ=րvB!Nurjmu&e@ ɫPP\[ieldzh0jM՞g_xdfHA\j1"*9reR4xn\YAY$2.5DgfQP vnCx2A N& JH+OdHtѸSeAAjJĞ0ph`TŤ nENH#m4Cvx*X…0Y@b `k5F upw})lFDJa4{SIĂJ(xEA`D K-_g#-Sցx6Vy q) cŰ S{b#J8U[žLبe\gTRµ]˶QhɽO o5&Z>b9Hl傕c22x™8o<{:opB.زIdoqob  I1lzv:o<C?ɦ3,Lv L6e&>3ap#H5JGNJwٓ4U`g/j9cED;|a124瞴o·l (уe C8֨ӊ5.0>LmL]HmtTzwJvAIKܮcEz#/mLIH*X@mU*)mpJ ̀ۦuQ08iq5QJрl@DZ#].f  '4HpɈMX]V0RP1Q[էV 6\]LVIoLm_Kv 㕙~]‹R(EɶW"@΅_V)82n)]R :=Wpe6 /Y7B ֦Le{JFF6q 5I < )S{qP--C뒀EJPr{t8q(h*"OcKQbax+fYj a&MH57}&US)(%BmR:?VQ gKD[;]&BHsZl R66U~ A0ժ@4g\HX,q['kD{cqjM o|fvGb3iQ(gapuOLO:0YE2G"?k{m̈́ꠀAotU:K-bÌ8ČLtJ=P T.b/mt F%fXTJ<, ZhR-f.8LOS$^ %XjLpDiprg&Yh m]f[ P$n52bXBDZh3w-z˵lՙ{@}JEl݇eyMy,1ZW Oѷ;Dg7c{GZERLkyt-n]לa++/ؙQ;ma{ ;w͘kkBo&OG_G{Pŋ˩o}= Py~̰[S*p[ߺ@j}yS1Nǯw3mh[fXJ \EJ?cy5:j3 VѰ3)ޝwEV˜jJmq`LH̰'6AaL3P?s\,#dK=[[ {g)A]'~VUT"(İdN!Y!fR"IMXaYƕ-F(`ۜJa}eu56fRjUXe#bA^XA.hFRd)GZ鸴 r,uaOlW>HY6ب56wg#ImTK?CvU3'B@Wa4A6% -'A0B:ρK$~i>ay5 ,ө.bJl&xMb:0"Q) ^xj$9`qػ-f?]P5cdv.[/%JCDփ BBlr<69gbO5,(r(R28,FJ6ZΔm6pvS@t䎖0pqlVJy˥sTbco4hGk47OQٚ`nN 1 qQ>LWj/TY,TџuۙeJ`Ҏz+9G:vЫ'T aᔸ: 3GmjЀɹ\rc9;+@hկ]ns͌_R%fظK.TӪ];8xK'ZXCa.9n(o<z}%c SĪ4 dZx#m%ٜg DKD8,;j{À&9gmŘ;ڲNI1\4+?p%ςe|к$GK!uu%-+-:-ɚ~,%VK8NiQqWϯJPj7 *i9)kua/GZ)i=~zZam/.͒ |`qM'?c\lZ\xpCi䇲ÍvRnӆ$f_c,VW14=1m)(06R~:)M_eeլ[1m UHp`V|:2|ւ3pc蚆k5A7\1#cGbSdmV1hCu^V9HZTl emuS J k5238Dycd+k[A>9jM5| 9:vk]+Aq)#͸ Y&14+W"Z/ՓX7 f})RTt6ct)լ[1m UHY|yjx3ZRN9m,3[LhuCCrSpcHP9=صlJ%8 Ӯw<hK9KrdĞ#> [=l>^ö1%n>{4A +h\H>VeG_^f\8JҰu'd@hHBגdi+dZ4-k!xWG77W @N@bLia:`b[wq@jnb~Uw_*'")˕O=~zv&VOUY ^s/jM򜌵h)!|R8ԢeU|az3jbLlYg<h6oKȪy_ g\@)m`'NZ PN~ݒNu_2,AMj2PO6fz>o=ގO۲O繻OJ+%i1=~:G//AER)m%{?Y lr1(9i=Q< J\آ<_c3i䢷E=8z1?3{~f#+ N<4?xtu X9(evBŤD_&T}"b~'I/~dVxqv5_Bp+ xFO_Xw/if0[G?P}I[cm A¶+ˈvs.ZnѾsx7,\[MbPߥ׾f"fOT:9UN,Y\tR&q5f a|~Z9%)\ՔJ kIa ZJ⤬v XK8%+HV7);,>oRn)=M_֭IhQdѓL 1aV pb Ԉas3%1Fxlg3bky! g0}eaVיBf:3"Ih6}dN^1xV&.HOǓYkXDϚR$j:`=bߝ:|*:XRp +.urOnX41'~(g-~lEH7+fŃ5GRw]8J.@Vc_8xK'ZXcgsTs,_;_`2R\O0=oMyw?7YWdݰ$6_c, 3}fͧj.VoxJsSuZI_=V i̘p˒|itu)X%jȠs<blCnt:$?0 }.窯 ԷV=BY_e2FñPrWʓ΋'y,U#Z$UI{頪,b5Ap֘vJJ #QCc-tibhi⚐]E b: @16iel骙5MN05 GI<6%Ib, O<7"^k-笐jc| 2# q?\. 1jtL?l tE}69;'wNp7A< 8aRfBICEtDQPb!֘8 YC]o>dtnƙ%iЕA^R!Vp ң@zО~tr;bNK.Ϸ?n.ndUghռ`m[5w^ݖ/k‚z<@(Ġ4 ai%6IK52&8O2DXFbDU3((~ ҟ.yc4Z+/7|Ӫ{  3EsEJ-t9?߹r,i4|V88:0pz|*~x7 宥RjqVIzW{ރV>nZF# 큂_~~[>`ng>*7-xӯnUL?L z pkF2v8\!ƗnB` *NPZ$=8nSj"'t]tIy.ݥҟ zpN7gMa|b> ˨0o*3`a,%j̥p>uVQڞtegcqLrhcM!NTKs 1-hUi&'!#m)V 4_ Fzq1~>{a5,YHrC"GqM_{A񬒷t譸Ot^Aj>]D7D\+i(Lbky"9a)(up3 x&)\J%%!ʒֲPl5~2l CY)?۹8{tڸ r^ͻro~|6V$;9ob*JXnV!2j)2H)cp6՛ S$DXII"#ML$c g ,V1Qi1!xumzh}uYX',=v`FC';)Iɠ:TL"eEIUhK*!(&BIFQ$6SNnف>Rt1EX$"-BjF J)( a^"#-ahaJ})vNgvPl1e%1Z0o!4`g Ӌclf:Pg˨yhj?f*a0 1m@+ _$yY\~>VyR%kn}È*TvrpoEG =#0+>ÞՈ)n֝ />3Qf QB_^$Iv ƚO["G--)J~50vIO+\01y9 ը.x|^iouȡ[r\d.S.^iD+<;j7QrPiHQaRl apN}{'\uшQtʂe-?u6rfKE,c<"~J(WoUff8et-, _tlI88iI>z3#3{=',q=miGpNvtM/,'>WryDkʲ5fWS#kY+ e$Qdidd̹4XRlXiL)J<4 +`o3D[&[c2ZK!0O`,<4JjH)bbBiD8 5-d$4/8Eh4AubJBI4ᜉXrc!yDL P^pbƳ^B5<$Qx!ܩ`*s*2(7C]S=9-`eo7Z5L e>]Xi/w7F 36xqK0z4x*Kҽ_d61ؾ6,޹ZS?l1S9%g r ނ*^{;m{o|gK[WAv{װ?+8X_gsYIₓxYxI\=?~r &ݧnR{m?bop f!8zT;-6౱ֱ,bL"' 4-YqqQ]Oy M4IDM0PEWMBf$JdDZhHZ6TB=wWyvgs@.iFi`^mȻwwqMBǗ*Q kKg/U HI z+: h9Gv$vM3=@apb|NLdK7bǃӯ~@-#<\.]y`lσb{/[x:s|]'}]ws3%dz1q)+U2sLf6ڧ/cIO2-/'z$䅋L.1nP2*|ή㴐ͻe[EGK/'nLKngTng3BݱwGj:$䅋LqD.>vݤ$|1(#:ϨR\ [ywvCB^zn_F2*qk3hYVp).~-dSQ~2*[ [ywvCB^))S-Ѕ<#]|9U߆Nzf`8ݧͮAz£F^t?66wvNӒʑv|P  ?㾹 G NuqClQ5%Wv__8c;6 Ԟ\`9d;j۷A԰keI#Wea2i֯&ԯ^Ƭ=QhiLd$; ְeC҄UұȀ06&s!eǦ7HOhu>*U"w>:2?nΦ7MFT )Y( RQ U )W)ތ&Nd+R,;~:D`j0.GƫwA8ڇtR]`و+0DV卯"̟"=x؀HDf&ۆ&+& ^:"oWѤKd C Ysd% nt\P:$ UL!`ʒ`J5+Yhʞ6eOMK*{JuImAG2/nj>5x wVi|7 ND gf[P7NN 8Ov5 mRn޸ckaM:X(W\^۰? .{ä 9YNK1rign޴xl7!|G 04G1C#! j ǎmb֟f'uzֈ)H`9>B"Mn#xݚZe!3W^B.we)kL#u("3?{۸,Vr]N"Ldwa"i[AVQEIb({ISTJG F NNM5^W~&rÙȗV҈uc5)0-Wj䇻owfo\C>=_QL)WK:l NmSmxNlo^ R-9uVVi50yiium^Re#b ExSee,EC{i>ٴ2)2-UQ7Fc%(m;ht8΍w? )+ɊxŘeUKg?NwG"lS{:gS8׆x6chj qb$)_rqY)@*pjg0!NiD7 !bhM̏kƣYJHÜm={/aTrRNlbiˏ8ev !J?t_߹  )#0II9h-qX)BZ',TՌuH>4ҏI?.[`XZ}?W2~6}콊I'tI9 )bQbg2Qa(#yNQD(&Q5Oq#_bl{muxq.œW]݉*᫙z-oFDCR|n\8RKKţ:秪1  Q2AMI.qͼ{tH q/CӲp `BJwǜYPaTe0a2HJA"RݜS-I@X*!NTVPvX3ew qmNA 1W43F _Hid lȈI*4ׯycS@FTqw|$T!Qb i Q\TƜHRDű`##c,k|lB"cbQ #,̌ M7BX"H0#0bJ"e~';_>c4œpTdgif#,H#`qLa>Y>'<8CHx-j9WI\4*zrp--7np.xف(l6˗K[jS=2.ڃ ݛ Iy8L(zMp{m{-=ݵE QXxou!Xxs"J+TuRMBl]m2Hߧlmy OʦwVˢϸ ˧E(Y<(V%7hϦm܄B+nAG\XMw,du^Cwz܌u4>팈_okw#znS7;^%@xw8 f~ѳܫH?RqnI HGhZ0HN7:;֓sn91T*l; (tyE:gXbAqhGnj[DΗttDYa$̊uV|߾Zk8N+%$q'/o֑] s\ ӣqU z(5Pԣ?VDl>?>pSvŏ6ļ{co57)ka"˟nκ͢|,%q0k14rGz f/u (8%"] * 1 Y/ \|g֡A`v.Ʃ~AAdTl~df4NН2Ƃ‹`I0C8\r<&/7)E0.O\~̺"M+nFhIBj7y>f˭QXdg-ڜb1_r#Koz x첀I.oK]ny;]O΍iuE_p G*lV=i U XAPHpwnυQJ0)hڼ%z>Au;JHh:fX|ѲqAl,bDe{ ͧWsQúacSlf9N ,pº=tUl?L! Ck42|`0\<'El;;9>|GXu5ad>0s%{uj6%!zJ&kur6-[j;`2Dk SRѣ"Dh-g]-hI @.q s$D,3Z98@.~^ Tڛ,0V\ۓ' ;n B+FRsJ$Q Xac2`cQ姺gq Q*]H B1q2]^Uyw ˀJoa5rIr$ZnbD v#rZ| FDe]'ʃm4x̃ :fxzSeKrRkj=^ ^);g8`7CӍA$\uCҹNSL m %)g/q@YNt1E_zao&w{ -3awC_Ou>&S"`&ci_FA`*WM />$fkNonmÁ$UKjBCZVg :[|}!kN TsP\+A`HϪc*%dwJmh?1C)A) 8 7Tʠ'aћJ1!VȁsN _Rդٴ:w@*@5y% 1ɡu$A4 Rl J 7/~$Hu0D'.تKQ!ҭ?=HI0.@-b4Axdi2Omm"!Kj2*^USW+A%ZFR0<?e\pc)OȧwmHb vu6〙Q[}5mRҴ%_'N4n+^qAa,PZ ƁQFiw6$f7:x%Am)ҵfwEwR EjqsTr)f:9!z[;У@^jKl=.t['H5ۗ'npv\PoܩڃH@.MaBl>-a.d0BP{Q$NE!m[{=j&Ѧv(}wHX:]e*NBw?GqEN]P܊PW>XmqK!fK7cP{4飹b *"խK|I k}j|E ;G,"s6ؤB,D˘> swPaw+.:T\p F \,Mrt=&ĄTV+XS,S+7ND0 t]:mmyd-(r{(ĵ7@{ )9`N-I Zsoa6@5ζ0߿*:9v_A{PCGaKqv"^ H{t||:CAF!K*+R!%w$hyݹB" 0Lk䏤R:ȘG҃o_ڃ)((]<}&"m NazJi9PE@CS{\`z)/Bhw׏F4;Xq# %1-f^_Q42 ӑ5e8D I2HQqQ&=FvѷUۯkp{LhcIY04JYԖqDDƸFD RIͯ-h>8lj9WJz>A{6-<дFiKDL)(: VEn֝"68 8@j܅)PcM־'r!(  $s?} jsb3]p"zx.0?`"D"c`_ϸ\{7TlŁr>;ֽfN16 Kh&P3jOas 8x~YX@jҰ%vڙ^^Ɔ-w e^vۭVRU^xgb@ѓ]t'ׅ^$rJa^w ДwRra|v_I%:sLt:n9@ Tv_NgQ{ tAH>HƁZp1 ۲~I;n&\ J\lf6|Z JB;R+?/ұ;][oG+6ŀn 'Γ67CRvERP$&{f8L5[tc n(Nn<'+ku WԎ^!nn@xo%շӛyiNg+8b4 ؘ&Ϻ^>;v(^ɺ/.|ߋh`1Y쬔Y+䝕C(x( %ʳm8( BT3^GB!/ :k3&RaY jP: ڒͮ]@FJӚ,4kEcCL(c23v3rG˔R2 ^A( ,[G1k$!sR:oS)*Ƀ˛YHAkGt"X)aBC-2OxJAUU >VAsO>㰧ZaCKHHLS?eL )ܜh)0ڮ d8DW^>[Z{Zfv:L>:oC)M"zxx* G7,iiA^5]2K^݄jC5zD)ыWo~dH7=b wgCqz jqRinD |2Tl4Q1Njgsr Wh+Ӣd03co'T u`wclu0 *TdJ"=4U;\7.k4e9[<"]2dv#UJG'xXL>_]P >6~z7Zur}wY֫>,>zjVoƣ IÞ/-Ztj[Gc>aȤCBP 4 wMKor(23ez vZbNIqQ=1yR5ɗ-ƵS;}\O.y\y*'O_ iT$V-ےWن𸮽yջ0^̕'\MX |s;݁] BFV-kAh5,G U0LZnN8MRhh~'*E}̮8ٱTY3EHرl67gqվ%\oߌoE |F֎Ms|Lp1E*'hsAy Hh93?bʂy ?o!2;hf!wfn'07lF6cH;a6+.;r[o5Trq=m($~J/6xyy6a<~|L@6'oP~ݭȏ6/j95k4ރ\nt/ᗃ9fÇ|!W׍zK[ƵD*?+1UňW;OrQK򆗤o r%ܲ;Kv 0kљfo5; srLX9om|mRhYwu|m΂y}tiVo׭GHu6mW㾻K''>F{eQ_6*@4hxa;z ƕݤː(z93| }B@{k] H'8m}3E1'qi<8v_z46O7e:kpt1ӄHfSt;/wRYwq'0M UΏl} /#%Qm_^ބ_\8&i:A0WO%M,vuYc2 b^k ly_=>V:vCbud[yGT;ml6G݀`Vv kאN;µ jSvS; msvVpV/6{*٬fZ͊ņk5H)gM~Dgz4Ҽw ݞ4aO^)AIim3eD,S\;' 5uVq ݔ -o3iiyVG̳ ͉VuZADk@7[DbqՂYM$/ w}srˁ*ʿ֭~tȞ/]ELS݌ hЮXخnK'_'k`*`@"psWoEw v$ߌT8/,-|;>v*9d0K{[1UI1T! 8̔H+vHo>/ӯ=`$} c׷֫\hci2<GU"➧#/q.Ϟg| 0Ž.\Rg% qVU!ş?BhIKfjfLiRǝ4(X.xfc%A! 3)B(Cb-GTB55bXD#d5VPG V Qn3;Fթ-mjk2˱sfReÌbFsÌ&4䉫hN[nMTkZ 3Ə圶J磄^EQ[A`]|kɸ1S W$h눷*ϬIPT0@2hvAT+ak6" 6^幐6DQLx (%^p'8FAf D$^͘j*Xw!Uv)} 1,vӭz굿}ߩmm@C'ٴn>XR rTtC i[1֭ y*ZSuӊºb:sdNEh)f4ۺ5!O\EtY(>+xy:+2aQ:A<#:f(WkgDKb Wjg$ * R"}d Q@p :.5GHZ)qS,-ZRB$뽆ܠZ^RAq)wU/&}w}Gae&4䉫hN1{OaGM98YtdZ;DRF.А'E:ulWYuvQwmmrֻ5Tj7Ǫ(8e " @}\B ut7Cg]EP` 3ֹ EIjPV_5ίz&S Mvそ=MnOdA&K+52%韉q^YY3tlE L%qnFa823L݄}ls$n0>mx7|8o7a6wAlyX]\VZʮU& N~e@BRս5d~un_dD9@Ȱ.[*2l6#C$0{0-_3GxdS+ +p+  q2{>2L4?qw?)ĤpvcfgeRs'ê w%[zu:9şy/{z) CSݿ\x1AΨ iҐcdW"DqԏdS9e[2AQz\W!j %^Lgs>Lf b!wf_N 4I}s)JEcAb C0$( $  ?aVUVfB\}'mntrU93Juhk1; '$䕋LQKĎLWW5it&I'miji2JD8~|]<%!xs|ҝl |pJ)l$(x2fŷ!}QwyFV8 ja"(IIa*H|5qBN*S.[j4N:,@}bFX4E;8[` 6pa"+}0޼Amz`=/Z!n%'>nEK m<@bwmpNqtlGŧ C/>M3h3NaCUKlZ½jNNWlڢeIֶ~{p)J?DPOߊ`J &#C=5'\5AuǰQꑬַPZFLC $ 0Qvj>_ ?wbHŋC_{*o4 |w8 6d -q8Nf+#N&s7V/[M,\λXMVYa;b$qzܣ?ȅ.N1kv>HR )"x:cW}nc |'Ac&*RU(FgkGe+u4N]eTrВ}j:f؅*)jPpb/gӳ0:[mN`CvS s>N&xNy4`^XY-Į! Ũ;[)f=<徶 T<6SQUc5)Q8>N)"0  Ų 7jqX npĠo@NN_ ;$>PD,HS3H#&kuNIM?bY/2VB@{7:&)6J*T PW8R@nI L8.C׈!HBekKsF5{_y|;.pz!K0V/?`aFj{kEA<N0A8Ě%0M#< B1Bф BBb$!$16k9ޙ?=8 kbj^O&,DPIhś -Lfg5;mqg*7Tq4ID!@Eg 8BʹJSNBX냜[,iu%_xinxyw( )UIRFi[TƘA 3U"eJA K'bceSRVxU0Rooӷ&Ru%so)E"Az\76={cZM|aWY8 H%!KKF*DH8Ee'T*s[]wDʀԠ2FHNV.j\VnqT7`>R;~u(n_>8_QRyR*؜lW ' `ZGq2ZPA8m@i&jVPw iMS|`2JγAdr|y}kel^GJ&hk#_ ǧ2hg؜m!vhy 1]NU8i~H:_j\'Hx|W:F1X{,  -ࢩŚWy9z^V8O [PvfOZ{&f;Um][#S}dj!wL!L[wt&w4rŪqӯ$5(-Ҭ+L/O㼑ڛdM{XOFvMN3>kx~ʥĔd>#_!$䕋LI X2!FϹb%1h)瑸v}#qmc$n)ⱐ3vɱ}xB)k۫+?5dQ$((#6Vrm+k1W~OVX,.پz<=7]^[]8 jAB#<]ބOߊ`J`&cqꏠD ](VKґAfޥ90||2.s?y49̑`/)B͉Ò _iRp֛N&˃x:wX9GVZMvԲW;,cEӉ\ӭfaLLW8 f؁*mr7Dqܓw^>¤4%Iu0ve" 4vz4l$ƹhdwjIV{4,~5_F w)NC ᣚo/ |;Rm9:2 X B,QiI2rZA=Vu3 qc=})5> 0z&b^Tw@g 5dB=p^g 9\#+pK$ bktp=*Xz#ĬL[9eGXcXqH@ (0ިBS)@H$1i 34nj'PĂ N8rʈ $#R pD$f)xK?ǔ6Ro4W}ӐWS3 .ޕq,ЗE7t߇/lˇ,ѧLxqoÙ!)cC-pA ×{חSj/f>r!N"X+Ʉ] ׾zv@z҈G,I5VM92DumѺҶ1.[,SXqptuG<8cM_>1t-h/ XFުesVMG7vsm&ȸj'3+iɴZtYɠJS~ʚSËU?tp35i~ Z32J ,,x4+p6| \ξ~8yCA8_BLS(,bw[r**' xk6K@(;+'.?Ύ9

}[>=t5gnq碡4H`>P{g5t"JZ6kUGi_. p|j#Ƕg ] SI?7RJV7vs^%ubWs,9URl ScDcޣTX$=S  сFp#֫ʻo>ҜWu:?7T<9`gE$X!]nFtw|zt^^bbqצ{Idx~kaߎ7 A qPr7\rl=G78f&]A zeU=R5Oҵꡜ 8*~>{ ZWnm+ݳ+=xH=,=.kKNw gWP go@\YM,ORlʿ0J=Oqd,$5N +d"()CI,* ~$+-mBw*C6aUX5ff)Ҍ̓ u&us&젔15F2Sm&5 Iwр꺘qdʯ 9]d#>y5h5` 0ű^J15Z=.vqmhj2y4IpIqϺ ڒ}Ⱜ|OIC$0ߩs@x>+k5CuHciنsK!ز_FG胥>Ζ.6AZYx(O%qR@V[-"y/%SVU}T}*Ok@!=FԷzu+2 eFh_$a r82qaf8a} S_tFM'[S^G Z$\ WB(!j1#N%]I(N1nO .1Cx)(5Nic`֐ *,?S{fi˻ju牕xԌ훞 %BЈT aEY45ad?Lgf(JLaMa=S9;–[J=%wӚ궛j7ߡDďzV*RtUɤ"䃔%, e,y5#,*C4q)1FA҂  `1Xl:QTdfIAnx$QTk ǜszmqG x];;nw5tvy=_y |#N9ǥBwٺDjv94)]2xN4H֯)9ǚ@Z(\ Q$g[7pl4T0<2\#EG28c`6 ;:9f<(epZj+!)j@ɧ+獵Z#C9hd(rmqԠD9,Q{0(0鉡bh׎ )1FvY,=&FW#J$yћiF-4oe7[ۍT QA-)ŀH-: #A@8BG*ZڍZRޑ$4Y,=sɮ6uÍشH 5_A@oy}"v{; m''|鞟/=VRɖx7%sIߑwo޾9MLB뺾 z~zTd:[a> W۾Sޔ6Uݻ쇥5 "D:4T͒?c*}8% s>#INW],ʿ珹1JcL}`INάk>1 RL?ժ;:C&je,a yQHĈQa-x.pӉU5|oְkV7P"PCR#33XL/F;4(! Ŝ0T D2yHl;Z!FÌbA,6R @+5T(4&`"sA0  4 \k&bDAL(Xq;V"43ﵲ&vG77i5 ZB3#Ф4 Ƹ^7LR-žq"b !vlOƟ?&dLWz&ٰPrb[B&bmq-. z=Z<|NnlW{y)SP2yqyrA *R/t+E;R\?Fk|mN0\3>C Ŷt7Blk8RMRvgoɰ&'K˔<@0DnmX3f2X)JvĤ=wRy51e(#%&ƌ 5Trsx7cw<MF#̺h[T0$R`FЛ3,{'%RkJz=vۘ`#x4#38 9K{~`FO_ '@#%hW˕K9<76!C2͵s-T; `-?s3+~:=X%ҔCz9B<4OqOkL_kՙX(Ck.*u@M tdSW# Vb?W]P@O@2{עA#F;X"m\ 2kF0MQ ?&ȥ 3ׄ)Ăr"b { ޾ǻNj&`ByТaz덂kwbx#F5 m"MT2\A2+CnIi@b9\+jQ8sJn-;YiɰyŌ$∷Qn 4pAAk*)ʑłJn\ҵ6>Jڠz|+ 4fî=6Zڐ'I9},ߘ@r@"SSm,b ߛLBJⅎR-#_}0EȻ BMs|PZ~ 7D,ݕJmP6?0ڛ| hj| {i{U(g[pzt|bڟ>FTOB1U]ሹ4cmll :#GqN|r?.pWWFCPk,eq2 m;7b9NҶEy9)3UI졑vY 5b,.rC#Z sfڐAQ]Su) sE2붷Ͳ e}' 咫(bBK7 Lr|R(9\{Q$ R@qM zK|uРc7 9*/}ƨ!52uKk|jBjx)q P+L+9EHIˑ)iYT΃+nGs2q\lYm8_=gӀ:y5fց||m+s症Y/?™g BJqI}j]+V']o%l韏pynpI,+yy1.j\B&zMIF &p7keqKKp?s؁Zv¢%+V]-P"Z+C/% [*As^E0LFa@Ỽ3G1rjS/_俗{MuNdb?]nGY1Ŏ]#+o'e !ҟ,26T|FdPz8{٢ D@`.Z//Fd~Μ3/;`0V$89x]`S)W zAHK0+4]fYh3/E*NF]JHi{.Y]Qӕq:kWYm}M֓ GWlmT~RPbFBmV̦bI1se10!?ǃ44o[=@@uΑ^wj٧ϣi[-rVcZq"h?o;a뙓t.)J!1,c[P@x*[UlWuk˄&Z.OCRu{TmmC6~y8U2lw+mN6C!)#(U(A(Ivi8mIWY;O,.W2Ō#6)VJм/ n&+] j[{ =v(gEy&ާza KqKeMW1Ծq(iT[F #K_ÆܓOWd6*NT<, M)w>!8RTNqN$0yIRVc"ؘc3 ;f yax6wd6iC !XJS;ۘ@|5ɳ-:LA%|tǀ$b:/ߥ(jՏ%_N=C `Ex_@yuu~\~_|Nכ9P* VY]xxY9?sqOU_:{~ө%uTzpGW^R*Rg~[iҞh~upׅ kʼnzde+xmNA&@o $,N,bV( L1L^HrB*5;lijeӤK Ԉ}㥖@K}&kAtG.Rp](`DvM*\D*\vL$UԹ7xƗˎtsa/Q/88YzVHPvvS8﬏ϊzrA<^Zb{X|x׸z{yI5NNיr<]l>~z?0 άYj0* hw{UćwgɼK.3*++M<ڂ#4sxŊ5nu!"]PJ kDg?P$ M/p(EM\U–Yh>L~ 2 Ut&HJ0a],ϩ&QR|'$Ժtů7i7\%љ!|x:c#CG FtDpteQ8Oi typm !jZ ]6%„8., ίgy ~1ō>}5d+v/Ҍq%qEX'1^ J%"yXԲíȏSikgx}5w:Ts7wZti奱j~'簓 YVh1jDGxw^Ӝ~P `*TU8lLRoPkr鼓<۽wJ guF<^1P"_7ހq`GDp JN^ZgQH T8#X`/:NOD~?;%1z kre9%8x5J4}v1 ! 4}G֚0pvY/5}I.a¼(:g^8y^cH9W2 `@OX 0+ Uyw:DTXJ$ nc=d.{XvUjX"kzns|WeM0VwVYh EXP#{ºEqԢbH\>5.iFPPD&[v.QLj_gZ4}X_Ӻ%dݕSVPg$JMAЙZ H6e]Al =$ h~6Wȕ"'$j$';q4d( ?rlVGO]m49q 4j &G"`9 NZ:ɔ8rO4pXkF O"_h#Pxwo5NI0D:HkBçi3Q|rS IK`:|yza.z녹L2m6SwdkL\V!v%RZIkZh2{ӯo0[{5JGoC1!ξeiZ5]xSOZ5̒^Y!6P9ߦۯXne{\l1w-ڵ.Q^`^WM#=O{06Of`cÓIS>[rwb"ʊJ)D`G}^GXӞ 5sl>wu9n֋}ԃoT7:o}'zoa'۞=: R=Z,=^j'UfVY7@ylVϖô5b@a6/n5bv!VM!k 2gi^v|=Syp-ֵi[Qݢ>,Kg\M yӦF۽X 6ˮgKnG!W={Mn?6,ЏX?L!/3Ț֭w1k".wky{ ye Ks+Q]kLI뽆Y:Zf嵡WM`Z?,I Ů?uzEUWo8*)!=cn=dǎFݐpNG7ޝĕ9:i8!anNVUϝPcy}7qG<4ce-:ۇ+TecKݞoy۷)wO S늵Rhl嵑|?7s{Vź)$-9K(E_6g͛~ye/a:tR9U[ >&1Dq %b 1IԔltxjc 9?0KcTȬ2-t^(G#֔{`z;iDt؋9G/`[{by!$*JE.{ =K^%Tb/=1Y#R+QƾUQ:$: rQ$G\I&U٢اȏG^n,nuK$ցWbcGUSD#]y7! $Tw'TidB#[X^"UU1GYBUhIsDh|5F%vvVT9I^Uce{ؓD,U+n#]ԼAF)d.kUWr)2IWVmFH[F.}P7Eu kB>N9^Vӽ~cU}oƳuZ"l֥borСTΪV)8)is~vso]6j{C8%eȡ@+ jj='}W3<_!&g'I<{k- 9Q>{6lsV6uYT9r9r7QݦyΤl6j9MDFvs$+P2Zw\"h~C_ aR_?G.szE5m9< t3rA/] p\]("?ᶃ͚Rj˦U9rs9kh<ڡ 0]JX!w3W|nc%3i[3֟3ږ?, p1 !KOHG,P?քQ\0-$fۗZ>|)J52EzHwS"ˍ(uQD.G.z-oo^/UR3ҹ$;j~ߎߚYE>0E.0/^҂2@Z8G HPb6 W<:]) d+ذ(aq"3"ʨM5$*"P셎X*0P1uQ2~g#l 8|B* {a]vV[d"Q|SLcyGdT O-`E"HMo,2 AV\?׎ǺV4XjH#U#"(fVQ9)""A6Q#aAUJ""-e&9|jL(5 S[X^ID0hR`F1p1I8(`xF =p5b^譎 +R FmJXMCyiB`%`QBON5S ]k&((hgFX>rHH LD<7Js(@fH A |,bJ&:HgPttP܀ix*fαN@,v (}<J@)rrIqE0ؑ"v8(@b1!'! c_1F/3QyħA( f$,Lk#99aX)QЪ*YxJi">LE\Hp*iFJjH@$0ʂIf%G1t")0~T!1=!\QI#[bQ 6 RE`ĉP{(Qa Xc'oiq@-ܒg |=z . 4hh~DI+3"m R{W8 c@V8a[$yEJ + :`|kq]cKTZO Ҳ']*+B(/ZQ' uCRSLӀ 7HÚGԧ$a[!䌬Ax7pP1hM >&8a++Myo7q(w,̾.]ȍ8D7 )|㻇fT14 {ۇh ܹrw5$£rO`/?W:Ud2~·xT S'{Uv ?Sx>`F9r'D-p!zK) L)b0qTѫ &`7L vƪ /_B>Ck3{`r|+ƑOJ)e\H)Uƨ7F1**+u0[C"%xAy|w(Wø0,tjEL[dbgEʛ>T|- 8 (B^=`*W9]dW2ucJZK(Kba7/tʶ ֚\I**=k1ނj1vXM@Bя,j+ji\_(]Ysɑ+xgc+P#ōyY#IP1}ٸjt7xLOG }YYYYY(litf9sce$ek|E:Z1hIqrSD b,+~2aqO]pr?}Y?n8%]J-ˍϔK|ցY)n4j=zN28!-Yr* cr鴇!o vm *DT"S0*ƀQj@h%yaȣQZ:[PP\CBcM+ l+%O%lRUFZ-=7N!0{)E(TʧK{Q[GD Z|Kؚ@Dߍ|CCԓi6oSb$tPY 2)A*,(I9Z {P3:tl5@֘o D)BCf~@?30Cyds io>Xs- ?I*u5g ߃}e;jpMp JL-1}/fG?N/&LFyNmHTIB}lONa|TeTs3[viK`˓UzZBsT,"3[lWA>|_)孧ߣUo ڳkW^ǻk7BZ*~!tzECAASQP7OqZ5:f _oP-TcG}_F?f,yݶ.ȞTM.ͤ㘷D*l^Ng-N 3z1p6ǬIomjB滃Ku.+G eix.94bOA8kav&@!RwMqx:'I9Evh~ZIzOc͸fy=j#YX ʇ`.2kmY$>ӵb` 2WWа8LdjԼ(@Dp"gFҁJ 1{6۷rktE\e@b|zW| _;|C;v_GFړħFuKR pd}cϼ̘$]q؃_^͋T 8A`FB #%U^)mIQoxs aU;2(͠~#E}yXk%}By5)1M`By³OL!bfO/"q3l'bisYŒ=bwS?|tߍG7@vo̡,!ء.dz ?~TٿBUL7&cbj.݌ӿ-@*ݞj2O d:>1c@(ZG10튎cup!WwߍG[oGۙ4SXqܫe~u,l/y|2NzQ]OCqS݁+,4-Ѱd'MlXa}KlW\]?dkW>DS].lg6}t нρvq^ԟxM#Wx,;hoP~M"K40Cb* A- гX_8#LEMY: k"UHc'd_u0Akú~9F_9v*Y|L\˧(k+P4ѸûRK>lk =:2 g#vѰ뜍c!u#.R7] -V.uc'¹qh9r[JJ7U$f#u! FE Js+|QSй9B7Icwǵhe*üEG] @{DZHbDᱠ`VA0QWB -('Rh7Zds(=fNGjGx0Yns$k{?,KQJNQ; f/v+ nҳO8ާ Eޫ2 'מ7ݥ;]RY$eCK6 x6 H!,a9jzc/E5ra3oKW b ;&z&d{5!D/6<v,@r7, CZv0tG5xYC"4B&D""b焱!Ɇ"T+>S Ɛ2!TrFXQ>-1Z[2M4Md|7E#I$it*>OyӲw;.AbdDPCQaB,Z[47Ycp Y-fb-"{'U;ţwF7?̫>GPd~9֝/ШpWe* tmS $uKPFy=K`㛚{ww s f=RnI΄ܙ΁xoFKuie2H}4- > F%d Gs< S?Z?Oy2B~D7VsA)c #E1G~6:.X|նVVץm,AdLyn0&d=ˣ~k?o`lVʢ9OyhΓEs^h꫔rc[Դ k#r{\q]m;jJv~\KA}- Y+#_=~Ȫ!akD`DaJ $J*dyQڨG!@P=޴5 R!y@0#^ #gCRtĿ)6@4ՄvB!|[ 4[KUH gQ[L@-qb ` =/*qm[KeT0E6PK;IhhҶ/ۡEEP!$@@zd\Dh`Q2Hv >l >4@٨mhEz4ޝ*hJJjl F4 ޖ :Z = _U-|~U~nޝX Atx?#;Nh`n{c,&nlrw>Mfsz 9]/!lMYnQ!՜2 ?4hBvgFgmPڧ6wI\%PIE!qt*r[ZTy[[N:L!W5j϶l-9GHJL6_]bJHTSP&KIR;TN4 R](꒐ ("w ·vGQrh#|Fբ' dVL2%:?z;!qWRqT. ZE%\j-)(xkm x:]]@a!$f=s.RcQؿN3 ID䨨(I^NaqD8Ֆ:Dnvr.?IϞmD^bYq1 rmL^ͨDbH:Ne; nLOиM=nl]m*oEGxv+ioA ǭ1^.Q./rT` 1yPJ{cAh&vLJӀ -]_fDZ"faNOTr|LO-X"uG[׷te?Mϟ@L]Fk44sMJX qOĎ֌wTɜ9ۚż 'UQf3Z\;~"he|i*PLIxA=.6&jbEdŞ>/R}Sk9L> 5LRNZ%X=tm;euLw?hh&(T֠'MXiP]Ѥڠ;⨺y0,QD΋D K na?k4 ])]BgpfNTAUthY5%g Hf@R:/96HBQzI2P?,=EJ RQu2ڇw tނs,@ϭO֭! ҨVB@tt`*SWFU6e"mxmAɂ-lY&l'j;d/=ӭ Q0;]fW/ D7[@P?o,!ħ/+knE/xtW:y-ҒheR Z,P\$ږh;9Y_KNhɗ÷Crag{?N^B.>jevZ1zAoOO}!zȾ;2Q:4R{;9w?ʶSؖ=Y3 =4ij:Q~u6&9vnPJOy?ԐNWޓ}sEp NF2XJ"Zj"^y؎y`7|41?U]I>_N @X!c5dӑhA -L00-0_1!5ǹg(}fJS4ˇ\9,*'Dž, O t Ż='Aaѱc;H{4}uKl>4v"v9'oy`Je˖tW4 ís;?Sh!D7}c-r_d#8مD 2_J-GT#<*:"9V5}Wv^O"3q:cc)Ss =W :fqT'n|Czv9)y[ u7m{܃nurEH8o:򷓯/J_xx!$8+:pbɏۅñݺn2j; TFϽfP|Ɩc tBd$E#bJܦJhZ妣lMnon(Cdu0Z5A gckHư1 $q_jY?ڇtfw4!$׊Jv؁WP ݿ21aD'`%x.jںu#6SʘQ :;=\{q)Ξni&#\C8-61}+4&+_5Tu4I~ ;rꞨⱔ$)5.3kzwY6r >G;ǿb!uګOTgR6rŒ!\l]WQ9Iqe ;oKp\bQ>Ǘu"_OGx֙xK0ζοeOdԕWl6Eݛ% #E12k:cdI:rJ/r'.TYjH/m( L)%G&q!T"FA'joT=yBFa6Ӓ$,6\E|/3?pUWGOm†P%HaC' *A ; 0Q)n8@l 1+ {;a[EA5 ^g $ wT(b׊.վ:(_c,K_V^~rfwT?Lf^;ydPs~֓dSӭ2?ow|}.M?m[?yS&SNWIiVc[Ȗe2FZNޥ"VO.rzմB={M!LǒB$БP)Ō!S}77M?{zg) 0Y:WV]Pv5g>_"/;Z>A2f^7䍗 F@Q!d_ {Bh@1&2Lٯ">)h@x@&9.BΗ!Xӭ0u`P+v^e`eJ "P=A]Yn5"5Mbl'Z=v:`ON/pE+Ϯ~o(S@!╪hmzSu]?,vF*t=Kҽ>ۯҋjCy ] R_{IkŅx!ʹQQa^P.^@ޥPb3l7ٝl6,"zV)&Vʼn|d)M*XfWH۵njYU薰PlxjҖo1s!q6pI(J=)NI:0͹i#XA8~ !!WEsؼh-J$q8ehni  @[=1:4+j“9>l}5V_{Vu/}bUDRӪ>/?{K$(/7s:$f.C?3S:DGlH+}7R4N~x2 ĒK@jX)e˹'*ZQATSe PHQ"p x$Xc(L cحUt`D8h01R b)VX$E5h?|M-r߭,sw}>l+iP3P6;Lim&ypt+eeLWO2;G׹zH)?z;8`n*7ߣck9W?Q;@,0?ӏhHea9ɀD{?s@n](FjXw8>cԝpBN CNy7Y ux8 ? {cgc=4&LJ[vAt/t:Bbq P$FH#&BsҘbY2LR%NRCpOjڕZ%ϓ%&ܖK水KjVXqT048YEŠ 8E1@w eAL +Vxڪ|w88 Lc +АNAw!awW\pRӦJ+4HHv(w..R |C] Qx-\HoAЭ?Af'1ߕ<$i~b\4YB؆+t.OC >B^hjY ӼAfXV @x+Oz& z(@0e6Yz:zW!;Bi9O,h^ jUUnG'V2lcW)@0m̗֛%byv?bH $}KK/Yyqrڱ!p8 (!0"X(NTfP;vyB>ͩJbA,bP?N,fC=ͨ/sZ,e32vV3a_ $^ъ|bހٌR}vo7z䗂=oG2sGϪֵtvSQ_"~fm3gHVLB07?cdG}˒ʱhJ~գ#TXFuqrn &0"1DO)U:vV?G9⠍5m7NxQͬ ꎌR^t<&D8r(H%D"J(8:|ȜR yq6pT(+(=ªb*#b!DDdb@ɕF1:v,l 8[jr 8hjtG1JL(`^z:>5~s,ˋ|SZklVi4%;Wb_j l6C{ڎB.@ xݷϠ; >3/hs{SR"z8QnuPk/5`5]FM| 0ǹh 4U8:c{OE沫D6LmZ8hH׍y_.RHY^(/W&{Uzߠω/M4z /܊z;8,^,@BuX7q[07Ȫ-MFQ~OL;{.jz7BKoW$<j3 dкYj3l;.e(끶Sa:]G 4ͨΣZI$lԙ3ic;{p .PRFg@g= :߅cL #kZDtHȩFH'k9: U$_XLJ^xOrDBNoK1$xkK|Q-!,5&ҡ-!#zmrsN~7vpp\Rӊ:.fr7r머B!k=NOrΚE mnt&i.@UK+!( lYXH6NKY>׹Xi&1̫ERLܷf)B\"u,(>N >l\^1a>}=ha5`HDy5 ׫z׫z_ 3 A*I,:&}1 0hb)&V c 0С"6J L)}[ \w v^9fu 9&Ŋq5c b:Fk22ڷ&LF tx&(&bs̲b^ XzB6!]ɑ=+ОEg{9C_8klD%ڋ]bl[Qkpb56Y,CiSJ\}a3la1֬-SÄu8Âvh^35J\Ǖ`Ni<1rZGʍT˪``ϣgk,5yv2G0%l_GfGׯLV&s.32&Y PV<75U5ڗYclJ[}fkfl%kvCs&%B- w0 G@ Z}vPVEcB`!aaN xR@ITlt47%;PIa(IgD,a3WKZ!)$Q %=E-[J5--VÄb LYVN"r PAB #˒OZScQ8VCm X瘯0`6d2 >ɩ,Wq,[$c ؁RbF3]P8SdHG#5:Kˊ׆d̙bkm ;j/ku!c8M1]|0 ɨXl͊!*HB9򿒃B k茳>a&X4Œ3{0Φ +g[퀇v^ZƸbńI!4^IPXMkYٿfEj-WgT#~v MTաX3QL-mvRn>1/^,aq4ǧ¶^߾qqo^/Je.fW6i\>^ٲg> Hvc4kʆ:ٺT'by??EE;Ѓo:]i&..%z-|O?L˼ OcJGݩ'w%[dlBbec;kX`BNV)w|)V-^j񎩙J9֕/<ڍG(=P7;F I=W,ъ_V1T٪g|g?BP^߫/捓Ϭ.E*\^_Km|1k,.7'߱ɇWB ?⵳^-_N^ԟrtM%j? W o3(p>j5"+nK//ޜ|-Avkg\S-hd %TcXAc6AD `h㓗1_]1'99kBoiI}o;6OsdMidsN0F1Fe؊1,TneͰZ[0hsU65OQ]T4ޠaQ4H(Y7Af5TqvG}3mzvVxՆ }Ϥyv5i;Z-3֥u&ĝLo#! ǂyA[[tɎPƚV ؃[SbhvEGB#YC,MtX`PY ˵,R%X^ARIro+w^iwzG:!PctZ"n r\ t$|q1z )A\JQΠ ,׋{ںC*2 NjȤoZ'Ud kJRjVڤ9gu*X<2㿣2k *t!;&%N19fcIG=X˭#Mr<>'+ fc#XjVU»` 8Vo&JpZYbAlYZ0JCn5g'VURNi16#ĢlfV*^'3KhL+0v+WqGK]6A7ڭn,[/2T{^?~$Mc-γD`Ǣ|Ȣ: ;H=T!,mY&rhWȘǒja s2% 1x7n=KkZ\WGv^LsGDp.ޏ{fӿi/3]|x}֑[| u*MՋ3εFECuCԾh\P[S{zpDJxywDតCjÕKJLlFBAgg ѣ #9T0:v:/LV$xLT=+>8v7i rȃ}µ[3u E[OmEIU H w9 ֚Lj`Ky/O^.D%eH@k3H %Le #DrN3캩*E#vB龊 0sp% j06V+`#l] (X#<\Ft(hrJm| F կf>fLHZbd7K/Zr҈DRBv@5`?F:%dU'Omwp .ddYVz.`&&!4~5}Y}ʣu %tN=lp_[m?|i5:CE?Ik9=pӃC`Pn"Acܑٚq7 &TWk"ޭ![u%fOsV5gObVrGꏞh!vd@=~Τ5l}?e6ᮮ?0wo χO1ux|>&V(b?j[J[y߃ڴtۣRH6ua>:87v/J."ځ+00vM2:뷀.q-lvYAAPf|桹 [ㅋN_A0Ewu'HcM#6ox!uк#M~{0hiY)+',>,^t}2WZ~ui+iOՂ =rsG#DjULG5zV1ǙT :(4,w!PO+-a?dȪO6\qIO;VG.2\Զ9)xRwD`kkn[œsn =}isIٱ,;bEٲiAofĉ-`úb]݉%ow = Z܉y2]WH*b;z&cޢ9}}^k[y}A"d)VW T)h{ :J9DkTcJj_I:^Υd|'>Di~w,yBsȼ KSt Ķ&M  !M2[--t>E2hexs$l^F+)~E M.Ìe<ߙپR1@ 9Z\$+~o4s4 Y??vjZ9#$dRsZaIx%&9Q%Itxfށk` (5$kP Q֪NhXt1ԩLPW& V0t~j۬2/wS Z@;N R2aIEA8zUq^Q;G<[+#C vlW过Atd_()Lށ]=_e9>C"iBLi(}aАO.zb8M8hN'Os-F.̘R? ԩ'.Hu@  s S'u!pp׭j/!iN|L`UY Zj# ej4V% ִy1yK#*i>^od+k((/ &.+'眗M7 ~+5~.X%5PUY%3ԁgQQT|UlaTpgK-bZxtjG<#*)>u_r4L20kkjkQIwJLR]J- k22(Mj'l]*{TB3 Z#u>-fY~l7/64bMl,%(0v޴LnLXŖcTvĹ\h KxiM%=8y8n=-^.lLc8 lFh3ѫۇ&rc2ZyS~]]_Z<﹑_PGKn7rf?4ğƟ-1=kٗ߀R`_!3vc{۷dG۾FRER$zw8<9jrMv6wţ 4>ZS0ṗwJ_݇qp'k!*0TˮtS*{T /GaMG~FC>s.U o}}s;bBKpix黳6Fi f;S&/}޽&Lw81+?l͔ϓfB\ܜSF9P?nc;@zUfGp2@O N;|גsa& ,OÎ8:%en['qĂmV42>r0~WF'+ DJsٔuFb.Q`[N\Hqڻ<,-J(5f ^(v25;bهJ瞠IAO!$T I;]9#$c܍Bɩ1*`af8u鍮/}AY_Ckr^!DCjH;$,ν {x\4) R(QYI(!כ Ld02 #/:<3&ta$;X,I&oԦhcWzJ3a") '4ӊ@M,SVAJv^ A5w BXj-#PȲbk_:yȃ$\ =tY6 K:qA] bx?T /ylA XBv@ѣM(I*ܧDckcŲXhhS.y8"l #9ĦgAvzqC| wQ^A鍷%du}w{uݬQ| 깒uz~[Z55jD)W)jYf}xsZh]&C|\3${.P;rSFĕe2SGهC7* a0+)eq&WӅ? ؋pd—5 )\HQҋJUuyȲk`FAY=`) FZV xePVר8Wwo9!FX -t DqBEIeDRC #L+)j rdz-|C7s$Z,;3x16ɻ_nR3"q3hauj$g5ڌQQ~X羈x^EA5ǵ\hmk0Vh"HS&5viWQ\JC.nn{ &2dex{XLUnsZrJ}<UC\AEry}VBͣ{݃`y`X|wn\i3)*`p = ;`@l!=>8(QYU~0jvSD'JX s{J (h-Mw!Āc*)S!ɑ5v~Ik~o&R¹HY2 hI ZJ:e/xj.|GȢ携n A$!7tD8|텣+Nc 2f$ e]F8k܂In<ݚ;VUMՂHa)IiR\ ɪfk;!;;YMBc'ٳLY2iUKpJXR(# eUP/Pʓ#kk@a ݐx5[V2GF$m9:M]嘎df [?͗1e8UTZg"l&6R$x ;WjS,;2 Hu䂁WKh}ԑLgzi\S3`,B/j 9N?5HL^fKfk4(L( en. eQZ*."}T{{Hp*z.Vfߢ ,9I,OEf@t`Z$0:Ԏ?VAjPT%8EĩUچ&9\ܮ"‘B &HOnNRaLS9rv17Nm 7n })'an{ {(͘F%zQ umLA,*#k(8eҞ}.=UjB>Mk,vVz#m ˜*&;L|^c0~ D޹=Y2!~)3n1]RC_()Y@.)uw|dmshi۵D#\_"(b..\3qeVUrR ^#u &u 24ecoN+G-l 6^x0'lpaX&h|^;.UJ; Y Kfy`=:/R˸6ҀpڀZE»!{*jU?F`(5ޮ})tuX6$ + (M8՗n, 0goypÌuFjL2 0'VԺ!<3 ft0:{gǔO8|cALp]BǛXj,ɑ5v07g6qΰW0v)˥7{gl g?:HJ1t4^. Sfjr&[U&Ab4KehbY3vFk?sH$S9<\us#i 2a9G66h$NU=BMYsIՔ/e!?:F'ߌkNqPF1؈eoH 2f޵mt"C𾅙"lnk4Es%W&F$r|Hr@LR\ΜsyvV2.2/2kLCwz޺PrQ8i-`[  vqI0h|ƵT3dd\ L#oieR101p@A۠%S|K5..]9MB!\k'm sy"+ֹ0'4UBj*,O<#}wxHY -ra74p+xiVt+Ԅ`n&]^Yl> uj"$CG-3||ԃ"D9nh^ַӀ;_5 Vr;ćs *5X* !fbjŒB&p.UZ zONKݭdx^ܴo;?Ϥ<uNǣ4G:< iN.g)ӄN}.o3{6qA n , .{cL BtG *F"0-8JHY [`Js-,xyg(הh12ש69ݙub?7NjQqnFL&*Ns+ٙBNμ:$Jh7z70Cw;Y*z/>S޻q n(@kD=GdUB8n9ܝ֩C*i"˿sCG ̒bcw{! Hkqo\Ƽ2fZ !nH~a_/XZjQAk)gb()42&j(̑ We.ccccVte[ם/C7d'*-Hze# WLe eċh$2,%t[ĠF-4 J1mījBF vKfGD3Ƽɬ<-iF%BALa!ypMuθhA]QD":Џ0ZGX4% _ZX%|TW8zʕTa I, ׍L ƫuLg~XjEB}4, =\ =11P.0KxC?G4SjUj#q2kǐR H. !`Sa!},ςkfT̼ 8B+&x#sFh͜rڃgLNت=[9g"=)p3="! l$B5P*c%Q߀ɸc(&Qr*ku3eu3)0&t}8w(+ݤg 88 NA;gِYpʙ~ =ߏ]e3XK?h"ף{Bh<^%=3<f|1 NoݽWYR4y<:邚o \~sjuo{gk7o9۞o`O/vzw~uo7 &^BF{wnAYɤ:ǹ=L.^~E2pg2ϝs_\ w?oc&J"?4T `2!Bf\ܘ$Xd?H?͸uN ۋDqׄ80`P,lqٹ?ɳ|| J_lyѩy -6uSPe΋F}PdV'_sW2o.0_GGib˥/f՘ܾ_Giwtwy*f$.t4RӾlx&8~(`rgy+g8 ?:8nM~rj4<fb_Yz34Tftk0}`g`f?o/i: <{ ]äX{?|])ܻ{CGG_A{KW3uM{#UpCP_"enӳә$4ZP肋/N'}9/|_|T?ZHƽ\xO]e+cƘU01Mpι>v;o;޼h,<6c񬔸UV *ƾ (T`1a9(E4 T'|xg3y?=9o6rԜ7y>'f{qU%}wd_Bj7Ѱcȭʈ$z): nAsw1p=KlLT@bC9\X@ 8P Ҝ|O4'Ҝ49-͹?SmZS& ꃐ!./VxBřKP9 UaQ+b=u[ŚHc-A[Kʄ(8D¥O9SAYBb_3 ZBb+ ^_<ˆ1r#OT6ݱ|_j+ʨ%.Շ&WQ}є\!L-*ix6F*,^y8^ K{)pxy#I 0A@}h΃\SH^G4*HbͨٺWWtf^!TVX?J)#D0""xC1FAFͧD 9@DxO>з%-?B2Η ù:/)V̗iq> 7_ I aH4BaNJ`Fa5({[l=aSu<غ-nu[Wڿ`/Fɳ4JQ,gQlEvIK]9jL).t#5*Dɪ1gL|JjL&E!ֳCIsc ^:ifY=>Q;)0KL )w3Z7c@9dQDZXx.e:rAB~77="[G{W[/w/xL&<}0.~?dϦ~aF㋭?o^D-SX or($S/Պw/oa >_a;-}DFnoۨtd Zb{/9_7$$FJ .x {=vŪRt>2!o?0_\ D] Da{!AP%.Fi9X!q`g$=E/Kgk!mRhMG[IRL-2F6m*6nFFͼqr%ƺf?2'^wQ M2AIȩĥ8 v>d$G;}s-?UeZYYYY| Q|T:d_eHq a]d쯋u\)}!NK=Tp$֍sko7@YΌw<ҺxؔRՀ<)L){_I]~䴩r)x}6>n.rW*,K&L_T+DZr?=RE/rk ޜeI֫K}:S V? \rkW@FW^&\j2D4aqD">PD %q#8̉ 8u4 ? O*{1&HS@1h)~z͛/LLxT V0!r&T%tز Q0e15. Z'ͲCxTƆ0"eQ9 Т#B RaX ΍xJidWċMR e.f)kf)kV ZFuC֒$J7!G,ɶTՠHZ 6C p Rm`ud yC\ǁw#+R& !J&,mTw 4%8k^r^8fĺhrC֊> d)SƱ?8jЮҬ;ҔFiɱ%!+!ߺ$-Pa7뒚Z#w[zsLKIS)8>7H((}0dWa#",,Qe'L")=U 0WpvJƢ7@ǡX՝GiQv]=pQʳqkT"SJ'& ̹Xw<5f*ɯ_)2SɆ%( Zf*x6X˥ޜ+Z1~ d;RDI$K]fK>+Τ##TQ"gʧ@3$,3s:" zY!rMmYQ_\׶^i(PM(8V\QTeNm(k$(eN}1O-Q.2tZI*Z}1a'(c*@p60P)91B-XmNH(Ri%hL ђ,uK>|1L>&L0B$ertpVU?v/ -ʹ^=2[ѾI?Nv>T7+hb':PpIC~cwCcy7NGo'_h|q/GGIL|=Ttwz89{o\o'fz `h~;l۽WF5*[/+¸l|^Nz=IHcwW~lMz 5{"|X5D(F9X5{-Ӽ 1&1? Kfi.QdȀ"r}0ݺ+_?6k>28f(ӏPTxzzgo H;]93F6邠TH{k[󘩇p\qV7ٟAKM _Ύї]j 1_UJ6>UꩽR~ɆyUF糽ӎ >97Mk߷i@JWǼzzsտS=i0N2.]f<ؚݢճ23J3lBcstKԛYeRx{c[>x=gTpmNNioɇ0t77{C_3ucg&nL^ v؞O}=?h48m^ٻXj6Wu3(:j~yw>tdp'(wjJw[߷4zִ9\uAl6`:&[.<4GvwSuUO #}?ht}v7_v?2#tp}Pwjqx=!zͯ&LAft›粎r#plggDԃu+2|Trh;7n-;b_")}'gu88FsAf ϋ"(kgZ]#Ec2ƽ%$!) Hq8_:IKX)X' яpv5)m l_Mg%@`{xXBB ޾8-~$Lt`DEG^$NO"Cyb"DPAX[yLCUsp{~@X}3r?wsݼ9y+k"/,P.&$SD`Њxl'*!;쀕@NDaWb!!Lm]uJ|[ׄoߖķ%-e/X8&Y1 DI"]0Ñea3Jc^q y8{~0C ܕBܵs\ yM|ċ`%-${+]9kKJTm ז@}.N:yF G={}3?Z$$FuCޟ_B1h޿(ZkZ;L(cȢu+MgϡDGiE"1| G s/82'ߘx/3~bGn`}("?M f*t+;x>ka_'y&(Iӻ;2D2qd X)$;=k45 L.§#vɮ3P~C,OOoPMnrT&1et^s3,`` TH45ddz Nu[w:\5$_3<4A (l0BC3HBP1J%h[oW06{ͦ" ֏0:Ww;K&R! M7AE=ƁXH'-QXs&vlu1%58H)! 3  1R' Q2"ݕ9!wH U\K|䞿[SLV!7m~N%\'1Q+UM10-sGLIn%zK4#6 D0$AX$CV$qP= [Fx$B1YDl$֚^l Obs_mo)4pC!wgGaӕy˶YPZ&G*pG Ce՜3&*ngllRDlpi?a,~; ( f-Iɘ@vwxqt Q45ebSx En(.v\ D$*N46 F(o/r929p"")(]Hig *'RP8_P0!BTJ -RB)jFR\"B) xbsPX(GU*Z@(KQFbg0D.NPSg&lCV(&bAǤddUAc(~8GTE!}?JJN%*24cg )`TP 1mgfq8)To>9T6U[?re'8uT& P|^ie_MxyX~֨5kGشى]U,(0йPKk1 0X[$A7r}gPAn ͡.ZQryUfW?ҿfq̦[q0%IF̝/dS C 0V^\<3LzVDaiʜF*26#uY<`I KQ>vY̴xQۿ\3Ǩ',E!4.9n%[͠&t"fУZ9WY+y(ePp4$™M$6+dD< bb`; b? ݵ6\࢕=h?BjxDG1s@jSt.&6F.)UBDd|(S> \2KO ZܓD`js44w7TXUL^Ov*$D_[gzu_ 5)Jܧ}]>-}&sI /8jOUTqnb|KI5U0LNGDz%D߂'\rvRbtf^[IQɟp)Kͩ*'Hٓ^!7LJ RfI%C* + £s20`] 0J,} e{ K`x{4Z6w+R1ڤ6&\Lc>ڢ$${ۙ{v$'`ԕ9R)J"xspAЁsJa]aE^Ig^yPST<[~|[a2$S S*bUc]B}-!.8KrSˎ\<%:=PYR&@HWu\Yk=ݫ۷O63PՀӘZ #tJ$$5Zw2ʫ+bX{} cKQ#qC |hĞ ;d:715$"7 =Xo_'Rh\F3?S:p/вU40<:DJ\dj c-G!ٗQzUՕ"aԸ/hCd\iͅUG -қgÞҾ_"u$N&;͚4kӬi@ uA>fn]@'n03[rXz-&oiVBJ.A~8ԅU.zpCI#v'YpbjBX|1N.^>΍c`aKj/ (G,$G4PSߑw$'T;TUڱe~| ) c`;7fhҚ9+ q2ݤh{=p"愴mb9[^:/lX/kiA>βGZ{2Y͞1!/8#^7?KIbqWPp0SdRfJFdUIL+8 )T͒ba-'c;3ˑ 's0]rľl`T!'*FfyF-$,:=哘V(7Q?Gۻ7t=yXGeWH8;KtzmptY窳 }b~/U]Fw5m:y=fQr<99ˌhnѦS~{ 9iP);Ww^B1$ge̲Kanga"5opouA1e>)c.9ĺRG"1ǒF͆*rAu Sk݆oJ :]GN6(L?~Y] SKINY<>e+.Ӈ4B0,;TqTkhYz\#^!СeH=ra]KM>^ lEbo[#tEn> &j@ȍ9;\RS6)[>W;OjradC mۚRC7RB }$UJQ~+UI .M5_< *RK,yxtU% a[I#2 c5'Ix7ayQp>ŧdK2ZHM=Ƭrb0AHlP.́a.'A*-|sh&[`P#0,Rަo!N4y*tKܲ>)f(=(27.Ro.y]^XTDzrv}r5WA7b@T;57qD$hI>@: 0V.Wb aI n^OoUF(i6rl܊m{0A*<)\t[ y^oVR=#a -wg^cKLq4RISc b^<}==иϦ+CX<{wEՎއjz%Sɥ #38Xiemh U]c wEA"m#vB6ÍƠga~oy@Kţf\ PlhWǤb{hKJ 1z)hꤩDT+-TwD9%؝?n?(ߊo0$Sbxn\`?L aQΐO}տջ>O/af~|ztXhT%/=+4VmBy0(:sL4G4L_P46 1kXg{w4vMdi>ۓ tx>Ӗa^ʑlGLop˻VC\Q!%2aR9</Vr<߀PbWپc FL!l`c+~_ӑҡM͹fFGX?! g|@YU%IRi9:B`){g Vڙ1mN鸅>t" V aNL=>ye"\H k.&SMTM"c 0sc~F&Ż#/=:'$\} an௪ d2a:YIulq\eXip2WblcRZoQTN W++eT*eM9lQ)9Ť/oDzvN唎'H+`e1|(KJn-c;;XB@Gܵx9μpA{Q51\ ]85٭vF$S4/Nָf|I.լ|vnLh_@Xzײܫ3wGI2"+$DZ{`ۣi oֿORU-󎜏 ܘv4pLjWr%/֘mGyZ>ijz%L?_9ߙ`zp!C?`z9zTRk rU΄*%)ܟ6-,~Og/qpZ:LE~8#Bk ۹1Uv+WL 7.b]2p_M?S>zd&TMM4зIs:RqbbgGNp{̢;nm3^ڹ!-/.ÍG⾌+.j,o:eTK]8r$+cEix拾1!.TiBK-0JJR)oqD+=q$ &t=o .zÜ]ȿ￾ZF12uG1eNcr&)\&q4b/(Yi*zT+u(gX:E"5uGLPHP&ra\YY/#.uE`N@uI(ձ@BJ8D(EM(%7C*LCʄΥ>'?|.QpWlu$;?cW1F׌. B0w!{/7Qӛy.][~w ̬O|N1G$dZ܍)_lo;i(vu͉+wΌvZpv^xRR/},$3ߐ;K(Ǧr2y'[Dd8v3R1 l!CjJN>`;3f_H.$p%ż$|:#˾?KIΰHbRV9IN Ҋ]7x2h7"G ) hԠ޹pr&4&꟨i#|Xa g+V`֛Sռlz _@˳o&.l{|hOo'Tjjݽ|ܗ24 N]¦^\ +PY[ ~@-y릈BJa,tUskRK-vvyPuMZD}QQUԧ"3,k'NUK#($Ӌ$méy5a^ϯ4{}MwXŔ@I Ld;AŪ5%$;$[7KdJQn-W]C^^^mUgf̟{1|wSՉYou<†=Xk/f?m{Eћ,~wY}֐&f%ئ*Mω>%f Qvg,0 ]j2Jȴ9jf(bnR9(z1Ld2.r>+{VIl]z-Y"SB6 68 VQad6a:ʬsa'(*TRZذ6?dQ#$Y'c\\Xs#$vF:11LbĘ:'jE4y}p+z !qe4n 2NYGO:џeY{nSZvC6O_F[j <.$Kb֔uSK\>~hW{?Ͻ6~QgI/hr腰ݢ|powx@V VH0~XhX<0# NMX~lDG8[7E󗁱C1 0P8Ă=:/1JP-UL۵ni\n.:p Rkmʂ#V8: aZ8+"=&+ȋm]TP}%jI1 Ta I<4 :ϊzh̔*@)&9,r΅'t6wSőL'/[ S4~1G3[?Qػ01q%q]S|d鋑*K?Ҽ&=oVxwW>[j[X2aFFS?OzqU!nn_v8HoW9w{ ^ ^bmUTt=kEzgu9tD/Sk'ѻ?ڭٜf||Jǽ77wۛwݧ?}Rr;;ll&glSMO]K0}o_OLcf͟vA.ӿ_&jƅovo'<lrSkNIz3eWnLwTn͙Si7}- )58kl!RrX*ڭ#Th{- )zFò.1]Ҹ)Ҹv);4a(qv; 9,jA{"T[-.BS 8UːU،ӇEV` v6qa[~::VQB%-[!lX) Ҧ6uqU[D1}Q͠:4]`c3Y6Z+CFUܻ QRܫjg9X ?X!3$Ui"ZPM0D9-2Y<+"qI͟e[^լ.<)#\̄v*K2a&A(gPQ6rQMQY5l^}ؖZpM䋁Uۙ lV5 xMb\S<Իڍ0 扁|G%֘e9$v̐+^sJ[ȻM j7_ \;hO=S,i7j3ʧ֜Bs 17nc7.O8yCOc `齱sjfDH e8S$94䰠YfȔ Rl_Tq๣mD^'{b[RDt|1ÏЁWՁWHlԬSĵ%}6˭F؊R@atk;J|a$)[]K%}cYj,L9gЬR Ӣb"ko02,tYY~<$2()(OK)?)3#&֩Xv5n|u>0?͖9^qyz8Ono!}`;]˵Q6P\OzXR\߾0Xz$g_yLQ9 ^bx{w ۏfatZ,.Q L'h3ISsw2>y%Oy8_5Ԕ wd^m8ʿyܪl4{Rω7a$5Es6eKbziwڿ1O6.{6f<M*OOz ,m6FlԧE4QCCإB ()5=CC ƍSnm[T%񈔜?=BR\YzR؋|֞Z DK)ҠDy4VB ܉?~-)ʚŠ[cp4[^C0Xy=DdЛCМgi[kɳ-#x]W8!Z#djX!,LSu)5>Wz+!sF3#m~fc*C2Oٌx0J.ZL z,CDE٦:yB %"fDQۢ/92OE%lgd_pOe7|:(~ք2JxTRm~3a9*"DQ+cRf9cKEjj%"bf'5qcMBv (R}nT`XV1ɔi1s80YB-0Z52p}dlm%.\}`6/ߧʉp8Eʝx9.lU"̉wq.AUG sX]sJhw5LBZYww#7;ΑX݌pEҵs;so1gujqI#O{ge7tGwDn /'b d_Rdj J# ~^$X@o&i>!j~9='|O|NX=AODc1,R \9B`X@*`O;߁}9X _/뭲 [lv*kogb 2=!f.ܥ$RxoŶpuIsex3ͩ"zQ^zoٸG!L)iI7`Tj*oSA YN9ͱ퀇}i2Hz8I猋:(⌰'o^2 IL̶狎̈́)! QtywcT3iQJSx>}dI> 7 Qʝ9j*\׫1;rH?i+gV(-kRMy [*= a G oټ4 <5st;ӻlј:NQeCb"UPH|y%Qkf[uϩfGdCdOZݒ#H֊ky8Ogu* U|:T#x `mVkFxJ6iju?=v[ַsu؜[) 9T*)W;12w! ,AyJH( ehr)<%$~ոkTty^S1kIf$`TOif r7O$9m`{jKD"6̑z%"V&3!01rbd+'KV|8k'^<('jE`}f;."ni#=Ҟbd)7:? ]waɗ?1ȕ |% ")0O)ՕOy& ~": uO簃:n;qj+\w dݹhs)ßԦ;)q_;@ k^v-~ k@-^;;\B,i]hwʣȳR[/~ۦo2[dқۧvz 4̓,gLc 5TƸ;O`N7; e"5 J[Y%ͥp10&,NOIF 6X\_K`tH *T)1):]#aEHdf<qڕt%Nb<<<^B$2ѺW7 0۔xx;} Wx4ĕx-zw.lжcW?؊|«9 ړ*~U'",m =ŵe7>5_۱&doL rv%RBZsiwmu sНTOǸμ?޵8sDARv$KM(lS.e "?]Bf{p v,R!ՙPR"4+27pSrMRUnC nRw?T!aT54*̏L&Y{$>_``,j"] xOia<Ғz:g~rJYGR]0dĹظ# 6;w&:\<}VeERD$5c/`tۜfni Ȓ,fHDeDDWow2>g5B_̉גC'66/DɁw\)ưkfy~TyqYk> vYT0TcYH o:l.n #f>V #|eQ vjݶCZ~9\HFmG@](n52XW"fś>_[Tw; `lɶ}EΣF/#Ggz)Т"uxO}P A t^gO[ B\);1|Ok;V3`͡WdWB^0Zk ?SS[܍3fEo/oDĻkL4 JDJRT ( т,Zٻ޶dUyoaZ}ڗIf&H;LOK]xF&uPEZLYa.N!0?h\B$+ @qX?n/mk˺ub3ˀFV"-S!"9? CY6k#F8p^gsZXC;'% ;{h 9g..4dZ8rNgjcx}o[ochaww77kQ־-h$2L5 @:d*|}W]@AKl(όFYRO)q[9 i_и!6Z@x!Z)$yAh=/>ds&UyC@9D)愗x%ϷMETO6 YnVd&9sr\ ZqP+!u%ki{ɧyKSKc63!+͉WOX7oQya!qT ||1Yy$8FbIQ@H_LN0YS=/ 0&_E1yN`eψ9g 1.Sv  |y3SFV2dW bݯ\نhd^hHZ;p/KQ )WX|͛>cS S uBZē0븺Ԧ0 '*夙( iڝadu[d,g" Ԛh]30\Tֿ4s ev~^ _CIUH*XZey~"Mt#!HɄqB߯)P-VPg4樲5Vs@B#Irbň6WLz:AŔzdt9-LJYȩ*4L f|/(Śs\4J+=i'@PQ&|bN,;9І7ypʓh*ky`ǐD1I"l}A%יí]O"CaJhBW[LӡP W**zTM%XxB$d`a,.$J7؛E"P60$TMǓ C_5ygb!a0Τpa_ol D~~95tf "r.OPtJQ F6oF_kE0saB̌Rz/!!OkuҮMK6-ڴlK&D% IETyAXsJEP A`$#A(0hFyQ[x)qLj,IexT6gw6~4QOl $CķKNS`%!$m#nvД:ZÁĞi4 #Pp% f,l-}ϖqվ=/u fe@0>0A'bagp<򰥎㓎Kd7gln1/hc 2s.A<ұ Kd`+ $y8T1EH(%mUwYG0t+|ᡚ%@!Qg*_ PcA昙 0 dKub v'R67tЁ[.NV՜ '2DEuz(NL5% U駚9MQQ%Kn"Wj} oqHV+VoJ* _=K\0kܐU%:Q!*C'JIvbb\U>!O1yU<'O1FT<:Wv}S|*OWS 4rC|x.9{a!-Tm>ZKĩ&ejH3@QאjuSB\ VM/ D< j:F="JըEn&7"-Pi'TaU $*X6E@OޙF\ ^$1َ1D΍S{+T1Y1M..P)A$ l|& + flW!b>zӎVhT ٌ_9*Ҙʊ5Q אsidKGMly9];3;YI^o%`9b[jkr!/Q^ 扙bgż_8΅#+vrdoO6XrUȳ("n\ 貪VZkVꜰmpMj╭wj *߹%Whu ߾ ^l}Yk:y;D]=:*$.=s/.}WƆ|vBuϗp5SUt.7ǘ#A+ YA]P$Q;˟$ZheNr%tl*+FV&VꢥTu"f:0-`--d1b%;Lh[B*햔EBjZ#%o !H[KO؃8NY A:Zs,ۯW9Osq`{q ,9!EXO>(tԚǃ0![8$(lBu#?<8QROqi.ttIر,hŁBd#VpgU˵pFʀ zF_C!yZ;UڳUkBk90E\p$R"w"Qk \ᯈ~-zgwsDyhs*)UIQ46aLdx73QۀULNI5 8X?qLIeڠ {kh;qwtSa;ҽL^ n7?e Tt^y2mqzm0J<^N7p&˳8߉QD^ JjN/|>k[~Y7h h&~nY{п{g.ȈUyLOw ,!$eݶ7={k/'W>oMq ̖?vwЍ-۶2D8D8u8yϯ%9ް*yUCE}bɶb|翝WR?޿{wy/?$hz_/^Z}{˸zvSA c"_^w0@yrʉ;S־bKzAf8̅+dO?ˏ?OIpCM+~g~ñ% U*x%9I#H17Qp3xpSu|ݯk.>3jώ>_'qЇݿ޻nߛ@{֯9ݾiɅ{SO%zy sz=NwU9A'yP®7DB5kZ;8q禯#&Q'SwgR!`וFWRty a$ -x֚u9p?9ͣqwKT^INt{їn [Q܂!w$(跒ϞrWt)؇=͡SxyZH(ʼQF~>3E"u9ɽqs%x1 IRb~xlvdl'#~pjJ[A |Gmz %gU0' :`t= CK..A1$!X.%%`ҭ|D uzA17 6O?֊̞ovgaݵ߆ 1Q`iw8 MylXp9n_m5/_^A= I+;Г~̇2J/0. øDfb`.h(!ky*hV8 /݁?C76̈V1]q1v =~ӧߦG$D򙣩 }RLnrP'T/ g%dD/IsɇLg0Rz8zȱc2&ZPPmcV4\n^.ԦÒJ,?o7(2+]:nMSέKw455,pddxL@•nYs g}Ʉ<{u v?y'QorD`฀ܚkkwj^ghToo9> j#X?D3ze~`+wGN|P}5-oHx ՊzA0(À}&m ˦߭o-}|sg#a̃lҲk)ybؚ+>,Incr޺*!kS?]OM9a:F&X׊0[ש[$ߩZ܎dݻnH~|I^c{? 4:kz)GMGjmCh.cOhzQ:~P{ /'WkBf5>GY#+- 5z t)\L\"FWRs[#}9> u]xʖ90t%q,q4UӸjWMi\5Ooq4&WMj/lp`(JdW0a $9v]E4ƱXJI~2e. <|<&~Η/_B vzaAB-|*%Weʡ0Cȸ|?Um>nB8L pI Yټ_'e'k~.pvYfGA~ rZћǩS!䔋uMy&ԘL?)S[)w drjT(T>Fe鴩es2:;0U׌o7~k=ݭA'.}?͖]f`bTɧwh* {?m12aC&lo&Tz՗hÏSeZRy/w~Ϣ%ь  .Ĩ , %=6&.4BLjd:j I>9l]NV9?}8_p=e%SUtf>*U?yB=z ڗ#/j8VM=gc9}  \ [:@sϥB;bv1%W)sU$Φy!+:>@dҾ]g)ة€~xC?+LgކR `:|A?.SN,IՋ?b*̺ޘ3>QypʂB3ΑAu8 <SB(2N1Y!) XFJѕU;|NYƭҁhc L(c RҁQEi43#?& $LK֑{8T6v) 2 H|,Q \Y,bh w$=3"v6(:CpZknYD^q% ;ϻPYګ_~~ESM *zN7Ǣ$9].狋2}yݥRp,R LKsU.]ry[.1X_81_^t4n_$gErug__f֯4~1&eQig}3^Oa@/HV<at䦙1_0gx'R@QdA))aa|,(,,pq Y]4(`-[z;l5'DIzSўGtӅ T]E%)*w!'4vyhpăûZQ@Hqqcfz:Hܜl֔ʆD(ykL Sk.CyW~K'=#AtT R`L Bf%B8D$i!D"$מxLC5CL ő/]g$¨OH»_F @* 6:)$^o0jn0SL27_F  Km0DcεFv $#Gi$Xr ]cj,`R"!:Yp;sF"Y+:uEgWb1hĶ?=-p2x=ȑK l &dWiwOǏ5p"A a'bFnegX+A=O'i?tUػ:Afp[&Zf=$~XvǪ*lEެnfx')&bR0l2J&,RŞ{ܵxV9*[MIĜOPM+qVzO <.ЖB#d跺k& Iy(c'߆7۰)>O[K1$ k;C.4Va,_ NGm#A"XC`qPL;AC`iz~1}rÚ|7-hL#`z80tA|[u !`M$|:nCBѸŷQKъl%Q6kzۖHt˛UYR j>h ̙*öPZ9E)VZO 5yxwSya0Š>c@՘36% Rr ٻ6lW|;iEflO0pyI`jCQ IT7Iqi)1SY,U&a%x3JҳY0G -ϗع槛dtV](̾*]muc _i D~/Js+'j655LS /%M/';p@>̣PMDbրt)tNBy$_*Ц~Jy>"Ⱥ0^pB+UK";B'5Oj.1X>J>Sx^qqow ^$~'.!="z*9J+>nMݚlݸ 0E'_P 񄧶 /{|LVMpV!XP ƞ kb>(#}0|;"Te%Q8R A^w2{}Z[ꆷ~ f+( fh lio.)sfPj9`Tt\{fl'NO 4\ DѰ깓g۟>: GQ*&'tد#aNW.ᄍ§0AX;ςB+pG{=Bf2#FcI:Xޏ_ \xg_ ttv6} Q+G7>R]\a=L~AJϝo"Lw=3C!986gPdsB1"- UR MRbt2CP "{괘8VOߪ/w@kN䩡:qdJpMg;y/SEt: y`l]U_|Qѥ@5Lx0[&cًg0 3ͽ b37ψm*J;E׃8מ,e, IVГ*~]&|V ׿.3?0l'.#/"F|0/W@^/F)dWL/+4%zss Ak1`ꁿlAsМ!+srT*p@,8u~qw 4ۑ䕋}3UoL{r_ZaP)qʔ> n‡32Fh0Qa6Ij\wKyoyJ܍m29B>:t=X D*+ Rl,1!,֖E?[_=$flb>Ô LְL7♖[y #d~)`Mq 4 VKe%O {X8ɿ+~%:IE۽HegX!8pT,Z:jdbM5W[-"V9Z("":G ƩehB`U pI AO׋5"1?jA]V)Fu(;47 ῃ+pP#T;s5^JHݓxMn΋ Mh2LjR[í* ƨ-(bc!'%;qC qp2G7w ""G8_ We?Qx21}8/~e]34?fWΰz?-@i7nS(tp}N僷ǜøeLiyA Ps&@Z@Hnwߓ`vLzb8W_TGw7wߺ1Du'|urS'D麐uԕĊnց"{nB K}5*#nuc[3ZaC$xS>U̻={K|/mg.كl͔]_Ian*"p0EM,\5LWBă_&R4ǃ^܌'/>!KS9$IA/RG~jkY}K/VDiHG/o$IIţȬʪbbhb2ȈȈ3Z^UUdMHӱNAU̱Ѿ;4Mz/<`͍0 q "]fl1d,B S#/T-@~V#kuBUMG0b#a(`بbU} ǒjNhjnYhYn4 $7}VwXIl}Yu>g_;(] pYuL|@cwފgV%ۻϓ^ *t1ͧ̕GZ˥I0XY?L&tTqq-l\#x毧s k4|X'3ˡ/\.n0&Ygj;l|J*jJ\b#>_; uGa T;"Q8hKdv &D5gK&lR?܀NzsBscf_~nO < F HJDf,W\b|ᆾK} T>Af-n huRvMgh)7=\Z{Fm]ha{ Azff[B&+=8+@ A3[c[w罐Ȁg3rcNLv% /dΎ6_>R<-eE $X߷¦8xU1Ʒ0"hM;jT))V]O)Obiq㶋mVJ#^(GpZ!5co=0K^$/.\әq3c3cJ-@wf\dMđכV+;3h`q1ɩ~fNg%S V]`1-Ř _a!m[džOT>Bx9`܆ld Ac8Yo@?X3,[5RJgifuvM8epCJ 9EY%C(B ;yP;VaTb5@t)!c3#IMǩs"\U}Jx0W=hN%/U6*,GR#@N9/bx䉔i)v16O82nu{3 =OJ7\%=,#NYu`?-=\#ewHSFCp@=qK-4^(\/9/qqʪ$]hnK&!Dm9BJV'EMNM8;bKZ)\=DbRrb\E\''k1VoK)P 5K,V/nylT9%L쎓8suOfX96}hgĝUpJ']'lq!ؗ\ܮS oNd񌺵 | XضILM'UZz_a1k b#)HiL@ZbWPry' Uwf1̈5)pbeWa[bb?JYܽ3ɘ'+ׯIBhօС(OH ->r{C'T>YsʒW4%4劜*ծ UP)pB @ƒb.T2.VV$w!={}UqJp2̆4O8^ 7eǜ%#t.NcPPF1¸K$30 q:_sK-톌6| D4GRQJ:[vD6;v{&9-րMɉ.ܡ2az/`8 |L`Fn~A~{@Nb̀`m4GxoBVp%ۻϓ{|?olnwfn'cGQiOsֱVR7n2a0څ#Lp[@gNzlߜp5:r&%=LjQ)m?9=c|g_ ή'K1{gkdON AtE@k~=Y6^x?q i+bD}.#Uƣ‹y905.ӱ-bV^ $P 4V߫;RYԣ`8:pYlƦHLi0_Y@`RbϧH+-%ghwnH@reLTVZ{T8B΄@ƍDkvƶ5BVAkxwfn'-8rKM$-u BWgu˨!YqZIWJ(twT)C퉕uO_k3c{v(v}C=Xs>̘Dl/<z &f?ޟM?|_ONpJOfzysm/C_F=\|*)}N%B:)r&iwZ+ޢl !CCT"ZxH7Ԏle).ᤇ`TcvH?cOiR0꤇,Ռ0,5Fu{WQ?^X,oG]}~ax_; rՏu.fW sl 0Dif@lNnPW1d;ՖK ڗN̸}gl?oC7v Zxj<@YzI8yNX<|*%UéaSpm%stts;Pt+ȫ^uz&{v9[ H (ıTY Ϟ]䷒%Lg{6ԄJ=L`} \^ZQzdJI q/֞R}vw)l)wt/tR{k@1ƥ4Tr3ot 5}X)T񞹵LeB^ٙՕݧLu>j!7HnPhcO?@$L!őVn n-ܤĝ_ٳ˙*:KF\}Z#J=wzG+T.zcxDQ00uPriEO(x$E)e@0ͩ(kV7I&&fTd:n}{,ӻ۳e! _Λ[4Ή^goȇ1eMŦ:/~z .6X^h1^6]>a03\Cx`o_s`BCink; )[Z-,)ZkIB^fP7nNu$)i쉦j6$䕋hL*;5Qiq?}޻4llFϥ9ͿT̞l%$ֳm{M̲8ɽҪNapou k?3{z޵q$B%3r/A{} ʡ(|A!)iH 5=a25HHŎk+KsJL(&=״L?~$㇩jDG1wHfW:J*^T 3Y,A'[I}ezUv=\Wj@~'~~6gf0+OFҡ*>jYw*=u=VղitJM:jjfu@W*۽+ [EsՑRCW"h8{vL**XT\.yT"䠥܏gqвR}11ȊN\ϿVYհ@Ҫ#Kjd Y1}VHj]F3Te cG\ (!?1'ghMB3x~,sM`9n&8%`؍^("4,ɫ nn.?wYc9m`-?{qՓY2RZ!QUxݣ nk3kF1W~XmO6)AED4 ZhKxDI֠)Uj otr#$r^üC*!L=mӤRZ}-`;>)5%FҞ˝ϪM'e,Q@vgNcU3?11ј9+&NcmUsɡ\Žj*sAXcXT M} 4dQh13V 0ie5JˆQ8[Ai|]nv.}nb˳0HO2|/tg N;@2,_(KTaWNA_mXN\s@vys|X۟Nv;3Pufw7.+NM—هwoB9BBt -zanv߰nzճય3gیgqVG_E1^d">fLLosms2䗂C_?;+b/˹O1a;txqEшJQp}0v&bִxXMV:_%%.54*ɘpbwFn++֔VjW=NGiNѦ[E`,_bE? 5ÚL%z4|gh/FϳN$PD?ĊF%;dr{8$4Zs @A2Y~U}}JQqr s! 0%_>g,v$*l?C|)#]ڤU/fi!3#ݭ{0lC&#*`īv ㈑ V'.5aXT=oMTE$1fD"a!ݭopO#-;vH|4#!NTNi %r1|Kt9.$NKD$֙ ;,a㭝 !6bC^U3)-8^ #.v(a΋l$ORٍ^p0ܙ$cu86X+C2–2#=qKc/ (@\G*$Y>˸,旳<Q ft_F{EH2:ۼ^!DŽ YHt8 eyl;*4{7/h2a6_n^x{9Jx@969|H9\ܡ[(&* 6(QF@TA+.ֹ(6QHmÇVPxno$ؚFv6fe0= uYj`y+ĻaJE(`ՑiPDvPXc+E(m"xð[Huӊ`:{r4هOADo"}mU6^ F /W}wƢQªyo`Tѓp;ױ\}fNN RܸN6 Nv1r1OrvQ8k819NΚ:WGI _dQpHBu$gS4II; 4Av;!}I) }:Ўk|®VDjLGFd{GaxH0C{{xbp~w# x >]~zp8QoFwNq i)6)u;g_Kp\ϿN`BixoAtD7=x^KXͣ3W#Rgoںz+QG91CbC^A<c cSSduL)obE . :B$7!`  T˖ڄ;l2 5R wΤBJ{쮫ŵïRvKٽe_BJs W3?4ʻbTR0"J{.81(CP1IqԌ'wwNkA+ťTu*tb'9b0Ί,F0)ڻZKͿQc=Vyķo~l3հi_&hM{0 CcI mI9-Ʃ[P(fY76C-1m U?]A Ӄw]a`Tj ``P\UwVfbפȱe9I.s߂}flIq7Wfk̀%6\] fZ '=}j$}_nR]ǚElb0gdXWjBuI(૟߿ Z,.?̮`s}PT 逢sJS1QCNPk,ƚ%JƏ(N"*3: e^_FLnڡtt.rYD.tz嬆V 8RQ|]Ju׷Jr쾍eὕMPZɪ;Na?P~* vj߭bt&q~}5.xF;qfiz+&k7G R.%EaQwW梌˹)cK EOvzyzo' )XQ_?}%e ,!o^=p\ F׼*_~yin`녈(.6d#6& r0?Nui4*$=zι `=>5?7Kc 8hH˝Iz)'g oog3xU٭G@q߆*"&l)lk_5 USp:= '07jEd&0ZR)&JA?{WǑ `yD!Og1.l=;/3 h-(ۋFEk:PdAp-p{fZJ$m+KA@*Y7=^ I(W#'VmJ<+ /ZxcBL ! Ӗ˱N Q sB4Vҫ" JeWܡ^ 0$D{%I K@S2 (IT ]NF]mGK "Ez8Ϥ " F7e9 !:Kх3'Q$0cɲCYOAf2ZDFaW3i0uJ <6pwYBڃTS:ges"y1D+ E趤{IcIseiK )1=zU Y5s_CrH#Q ୧癠`D򡹤;HLr 2RUkc䦢R^Z ] ..#u 0ɦnSm}}py.n[L(#2nx[}U*GO^ H{h=cMUlU,Bu]A CcxݥDA[մ7Mԟ;uѮܙw̎<܌=dN@^N,&2k\);Zxt5AYب#ry=g$&[)"fBST8k48fxI-*&OF7[ PF#cVsY\x /w U1ͦiܴSpۃrEQUcކLY@9ؒx u3Ͳ rFwYM˥v>zQK􊣫wsJƨ3RsKRU$Ї<F >+RL Zy|q0@8R ig\h N (-RA(ayBJ30 L:oUOQA:킳9jlA!$r)67AD0-G0~iGxm]/ΖD쌈|PN1iz۶I2,]MjI/cUW7-n"`G2Tں%”ǩ7i)jh)-oZ2ԴVW`,^MҰDwCQ`RAɉnugI^ZvXOy}w֖ i#x-\j[q\P2 e  kpJZq:*R] ^`$,iV k2P14M*^:Q|\Jf}$*I4"N[ zKJ4rz_Y )4L'VX߃fD0֙[bח+~ڔSҼ3J29qq}?4g5ШOa\HN={#Hך]?.(˽NR7C)(FLkc#o"QՠRn[EVacc 1^6, Yop AJͳL+xoz #o/ 436$z&g-MXQ\{jk4)A.EMކ7Sh*'~!^Nݼ+*uqЄ_O(g[%<_d1TV9iݵbGYa(xG6p>R.[_bwDA0֗P GNL)`~gۇZM5bqGϙ3++:so9N{V>}]w\R~m*i +\:bDhU9dp]:Ngh#[hQV"7A}GLI%yi/0 m;-Jx̄r HtDfn!|X|k[\/asđN"Y˯pI0q=u9|F1fV 1tdŝ#@`Tchtv+H}ʤ*]sں- z E0{wQwbE^A^-%C[ +I6TjHI& "f5XxUZ^~:p!|R֫?Z֭> Wl~SxYe NueAxIfiS?\6*c,7ST5jTtOm MjАAj,lIHC!֥[jfjh@؂͜Ph_.hpۯ̩%kV[|臣p*5E8H!ٔdrފAIoAFLr.9 4]—FSNnN/nYX:B`b'."&SX{tt{Q-!>dˌc ɣΎSf<}6VK8}v )pX{> iK◷od4@2JM#]dN5jȠZ|W5\ AY:==d{+͘(9ݤS&m WdJ*\W0@NNC|nE)%-?|hy5hSAMmMİ=k G3v=[i92ăvq!7 'LR`_څ?(gvVKL5!k 3XɡHi5{ƃעڶˮӧʈ8Uvk}`Rz)R*Ơ u9oѡeb\-OLL215X٣ߟIBȎn6T+ l'n Wi}8;՜='<҆ι̛6oS5|+QM}q/C =<\KÎ춽6}!H$-|{UE}]GsI.Fqs2M% yzA|:f@?ݹ' 4)E4Y 0e6)3mh !6H% ʄ>Y+`<)5}٫`$U)fD&@M(yL'J]tJ*@! 9G AK 1w#\qz۽:@?H5-ܛb8W_'u>/8yu8n{aE>^eByHl|I' Y9tVzCgCO ]c7 e9=L $@eHޤC!X}?D5'_][r7nCz}B H\"aBɣ2'DІf xPɃW "he| ]h| ]|wRxsV眕9gYN[>b2O, 8$:xL&Eϑ"9DͫcBc;@!YS=D 9|~ TL3ٽx8]jEc0w8)(:uPq˵&vԌݐlz+V?  o~ZVݱCǎa?A䮾{i>E< ?|IWwK>;^l{+GXߡ\?mxN NO~|q^݃hyvQэ.-9'֨eYEh$8e%D\!Pi7 eG)n5kpÐ#: }bYyz-"O-w$ AeXq1SMID7%ݔDtNDנ yAtDvF4d$KA*z0LGЄ @zgr~~?re2f@@- Ix[j[K\~,)ҽ<{w?ߖGӏ~>Lo^ 2 V'a&?$_]sǏ7߼f\뛿/ί=hqF"8'8劼#N~~#j>uAs,M5t;\ڲk!E-)FE.NhL˽ S*%Ajx@nM}>E*F^E6|ANrE@e:6Xb 1́e 쌈 5kh㣳X|HyS ssS>9_/ǿoݐJdƼ%8#`J{%70Uyc`)`j)0$Q׾|qCѾTbyFrhB 9AH%DzC@YQL+80N24$WV)Ϗ"W3*P<_R$[.d BDB:EhZ419T1J֡5b4>K :,yo㚇*gd_?/ y*nTE k]=FA >gd]Yo8+f1r>Fwal{_fIIUUNO(3bfJNR$JNJ ~$#qt 7`I4i-APujӊf D~lP)ReM iq'D.)kZ-ƐVezMgꇋ]U/on.ű/Ik..?5'eYu{ 9ޞn>IYg l''[F8P6@"@#R&ÿ)7W 9 `eȦ?܇mZxh\;L51XӠOq"raReze9\66T-ZVڶf H`€i8Dp)ZE3-?`^2<1 w"CKH&'/ Z$R%LNď2&wi$jVB$g7~rct]hrKD&JxϹK\E%LN"!&13`{ ]K12UO1ӷd]#ФG6 g^u[M8G`O7Z +$崞ս`6g@5GCA6<r&&p[8!wV7!yyDȉW4BOᐇTx<aS!/( f@hsf4<h9z9J4%s:H1y&`: s:lz"s:Hz*C|2)CdF|cζ$t2u{[ |cΚd(/" &um;Gˢa6־ޠ3v Ycx ~daY/XxSja+ z"cY㕙`FO<8Gbmv͆uZX1hYFcBAUӑCϛ_, 듳fay QhN̂K6v3v.XY|=qFH&WG;9=GCpT"XD9=O!8%ロ9="OIy\ӓsM8`eggҐKC*zC8:($BaZE!dt@UQH!!TR* fϞIÉCDvp& ) =2:)8cHQL4PQܭ5j*cSp븬+0խ kZ&5c[,Za6\HRp8AiiF9Ͻ4׮r` A8g*xTٔj!\jT#vNq @eՉ0"蜓Q)һ 5]%I?0S]޳YnDM(0SFRD4D08,2m4Jhw3?U![[Iie&>CH!gRPB}^,A!Y;>8Պhw;.E@kݎ:$Qۯ2%sM"MlUF[X'+,H[;DhKgtvS[vv7oPC(i[n\ lU-u)5"Ěƴ[F# \^qP--dP]ڬ|>s0@'>^tw20k8EILjRv0EiQSA@ᭃs %lٍLITgEf༕`AERekF jO  Aw*@VC+;orM˕-YIjfy~|AbbQrX|Ъ\X<>u &pͱ>F?'%!CWd |T#4arBOεJTi5|MKI"gƤ3{8jsL9eI8pJ_t2]6&͋¡kHh::l>F}^J$+2mNN,Fީ+{qԕ3_6Z4Wyg 5BUmM^[&sk_]^_pu4or$<ĺ3ƚ6]ZSJo3v0N"ӚV`dU|WNw5zrf1WcӺՈq, =4L(5eeHkS@[P@(mzad&bi( \gwSdYeyPȸg\^]YN^ {7,Qƃx^ "n E[$;װIAݻ,ցd3$V20ޛjKswEr\#ޯ"b=Z|5 pZw}N5*´v]j)!+ 2 EEdT 9ZxqD'd,Bì뒫%&&dM|~OvJ`-3͌.jʹB%ff˼%X2+}E(k!#By5Ji!Z Q|^͍gqknWy>pĸܗi,.g?5?g?G>8FwTjr\N|Շ#ՇDؒ@lBR!=][Apu0u]5hphJ/pJǮӱ.wniܳVBSH(jqXqd#pu(qcb(03HWʍ Fb5#ZǩFp0NYj5.ښVf@c^MɳKkِزm Ky4/f݌2E c>4{ssW]n>@+k=ȻU 1ȚSK|:PY8N`pm$-]d%(m 8Ԛrsϯzg?-v*n77j'P.B_$঻spJj?BfX%z.nI4r]ڄ{ө59-&A:޶Ç*FAM`CxҩV5*#p%($ruȭUYJ0PQ F_6>S{c 0)Ψv tOԚZZPD]WlYFi\i2R>rdhw^nW:Nx sb}+bʚwM х̾v^}QrrΏd:8|[z91~wtdClj^$/bH9"y!VÃoh=^AKԳZ"~x_K1d |h׊^txby6EVGR+1!G$\sp>]LqIDE. TH}iۏW$XMن /YnL.ky}Q}jSSK4dz{-J`j4}-8.U+FO1YEE+}=8عYǍk\F1!1G歷ý REQsDXEey82Ep 2i3[ZkԤ ͤhBW6¤SiV] TwmXym`X! EW` H&rJ]ߗUI,([ݝX<>)5hM>p8WQȼɅ&8O8ogE"5k<:gT%fl/oF~#Q<=QPQ$ԫAp6H 5V@6/W,x(R޲}r9q j\kTiJ~/T3P֪MBjxMU[XTs J4̇ߧb㬍`'45;.ufKڱIHV}^XĻ$Lu)jueS h~Z+{Fd}arWvlO7u#=<^l)(ey#(=ͫ~ܠY]oT9 =P"#5k<гVU_݈5֒8 RR#Şʊ&c+-c vCh;F 6 uW?K!tǽٴݘ17dYRՍMߨޓ aS4*W2S y׆ʮ=LcJ]X<RfO)Ⱥ2ZFӴ7M.Rԉ'.i*) ϺENa{_Rݥ6Tz1::a]'U"PKBPo[ѕUYw5 S$_2AR)dǒԣ8Yh+mF@ԖT{%o@>sNqy%yqZUE,COvV@ D11`?*Y~uP# cHYcT{|P״Qצ2Aj+TtR Kxk>JïK1BLrd;Wj- /1tlZ։:I^XBqB͢[A+[bi۸2h7+ϖ,'-^,M6U#,q/KE0QxW5ڍ~/O5A+S%oB/&<Fvδ@GνxeԬ{Ki7y}E0k.yY0w[{v1O_?8`H\'`A{֎Lfgm9輶,q=`bt Y jS;>ۋ!AqT>R) [3Wɮ֋`B,~|f I;WǛD]rd"p٢p:}t"6*:h?λnb,| %*ڛ- 5HS~%p_5. M3IHX8̴&">)%5k<VmZJRW=pkqX /+Ou]T\P"m r+X>w<[BS!kiH^{Z X_cQ X 9{C=4VP節d'>:3a ;SX ]MǞ҂/g#]b MgxN{{QX<803 }{}voDQP]3,APF.#ޭ[&4涩{ BgXy+a ,WE&K>h%Wi$sf8RQ6w!ǚ0H t4B maBrD(yۺiq{@R%HS8ԣ3Uas0lB"l-V3ta@R h8]rS `y-ջoƓ, 0з+~ꤗ T%)ُ1G7.=DgzVkR&]S"hf8h0b^֚kZT' ګt921Zջ0\`#chd!TG͚̀9^ɶBؠS>5*)X3 \⩊j3h۽SF&KSl,j:d,Jbq/*4e Ppp2-G#AG2"#o(V] GPz$5xPC>͵<lZa9;ieBC0րTj -R!Ǽ#H2772؟QG>nF׸Hl̉f@W6 #r5cXaJ ca4Ms::w⣳AɣklfRl:t[0&kd҄k *hɯ)5W H_~ͬp$W(U:}{Xdn(龈aUoHBL-Ro7&|)>gS$!߂)})dF.iO%;OhqJq!udw>4: %ek7ͨn D”}} ڟ7lN-5'I)t}f[Ϋ=y+P.zzuSMdV2Sw lfWwd=bᎬxboo) X#HU$\ڌdts<_xtN^J"14^Pec;$.qMsQ{10&ohТS@^g{ʐ(1ҥV]A 3`#m활q"50$txt %Y?PNnt(֏GڕR;{rx:桕{@|e{m;: 19Z4&;" ч\QC^O׸N=S/1*X>_/kN-߷g.CUN["NVNΖY]a٥X(L@Q+ϧ1:wJ鳂H@ $ K3a 2~rI ߕ+3rey_j(dBI-eZf:zjXr}(cNdHӻWH7+iv֣3bm)Ƴ;EP)pB 0Y m'=p$\]DLlUQ~(Q ܜ-{.&r4KD.Ec?ꠟJEs5['{GOn_墥oz|M!gFg YkoZFO%y^L9Ӝ!vLf?Ҳ7J:[}~׻e~ϭǶ ;}\HZFV_'vrv͓չ{ilwL}cuQYQf63^rkeIU4*AfRn9wPF RY0_He"zF m? R SR薏 ޚ4x.{SYŧrL(rrAFI%=֨d F-oRCZZʺ +7ԴpᾬF9BPoqX*P4@.Y *?\7_w~wzTe&LHP2|3W?gn_N2oޗSNQx +r8wz༰0s gghZkwԄ6ҡF)? 7ʩ&8O8ogeګ5^P Wp :ΎM^9upf]gǝ˽Sȴ;[af0x#`m"EJkT'm I\bsJ yV~D˂gʄY1li7–()v xBV=[.а+̙ &ӜںҗAruZY0"; ZdAkkmЂLmVsV8 ".e (T.8"x؀^E/)dsU8@;3 78t7^}8> - oޡwE݂n K/.1ZV9Ƽ6>cY`ZsɨW<& )ְm /8#4А2BC Fhbkh`Z&@j<f0i eD \PP$ކE4] pQe>B^7!9 tFrA6u#7nsb&ŀ1AqD Ƭ hLZIu^ rڂW(ʂ%o낅GEʩP%{5^N O? UR2?u -Qr,p~`ҫ…ݿ@Ij˪p\0v3V({ @&V 2XS|puDC/+`]\޸RV]Oɴ+|ץ/mڂ} ~x~Z]_Sw_Sa(bjŁaxO {r O~~u>V.aS|) >ɖ.yfm[_]]jK|R?yk^ [MRBz$^3^k*pERQ,z(/ -?pޠ3 5z 9nus;]qN>Š[@']QIGp~E&9{]_gUy@^@jHBaG>|~}윈"dӇˋbu_1bf>ܐ(>7S ~mbxƂxP0[;<3e_;l:0~_nO7wIdnOoNXN7'ͿO?=3K7JFl]XYargh.yP\ၔI:`ksƌP޼6@N?{W۶Ai-{À?u qZM`KKOdIO%uPM;{ڦ*TR*6ɮ> ,D?Ƀ>̚>|M=@gt]) L!W5Dۺ42ڡnڤ%Ӽ@o,:xJ`JǷxJʳkgBb'ӑ#pRΩ0, 9-1¡\V#3;r ͵ID,5" "B+XLwr MOjq`* ee)PO.ry?D9Cܜ!bLHHFT #j K4PJ wYaW2'l$^4KKWfnQcFHN?uI/XzrU\T.*Z`9MC TsQX ,1/#2~r}t}=|-tm*5>/eƹ465AkZfdȑ3W4LsRx8,V)-mha< 0HjV`p,w=:[=:[=8 G;dHoTfݎ!b+bLzEzb2FU\7S;Ug%C-C!AtbLk%FL`k#fp%"8ᾶg]NPYk*-NYKq%PF\3'48U' M@DG`ذFET(BD#h8U$=>ܣ\#S£u6ժ["Yp073n)&ܭV\ZZL6_}mp`\Wbs f}Hln?\AiF/4etsA- ⿥dsQd1&4pe,`jG\H#lt5 CA'2JlĀZ9d۔Zasɽ 4@(ndS̅멬2$/6?E@<+^()53*@§Z"%VH'?84B 6VBDŽ9'*'@$(?TYS)N ,]+}j~mxcͶ '53OGQ ?}[:~{?:g54eG~ukj2]ݽI!\ʌ"-Q{[VyW,b8u_zw]={ ntlt|ae;4dzUb^>KuQ(rߠcra $JNtkŰ.z tz'Q麷h*=ѸҍdXW\IX5!^B{X)7]mu};ZzpgenΛ]-%j8J5bsqWSokZL]y~.[ ޕSOI|*}[ӢJfvOaoEa*019fU=4'd{k6W*fLo|Ewᤵ63mtgH+ѬES RSfأ0§_1DQ1A 4dT"`P&FEVĜ2ҁb`(ZYT;G u$ґV2MD0"KDD[3,Ĵ |3whu C]h!RͭeV ֝ouXRTԹArt*rPAq ¡g  6J`,, B2KUqn4ryR%gAkך^A\J&DQzq ?B l#(^>MJ2"zH Uc8.x)"%T#oi̜b: q)׳iA2qN21HRKfȔ(9tגO޸^3Nz $'Kp$+ގo(yްp;J o]jHK2hCŬq!p%ƀ9 a&|RIſդаw~o%vvycPjVqHG ɧ}8~RݽūGGmoPTi*# zv ֥2J8 4 BayO4!iaա8E2PZ2JAJ9ǯ[Tg!j.4WV(Rҽz.=zW 31ukVsQNȒbQV$J8A8'Di R(dp\R HL($Y, Ǔ%˄gtǑ$d+*:c)PLkKHWV#E 5bż(ti2>{<뤖/"dS]g2# m̔,Θ:A =!VczFU# Ԋ .ZSp bV̅Gy@pc)=ըrqWл6=CLbdWw.وi_{IJ=k)6C aq(uPq!n,wlۏ0.\)qG`c$|ƅ;;?a s<ݨ]{\FcL<2Nzq\@D&dS-U&rMDeʩO"=1ΰ8IY sC%Dx?R>u;A l[O73Z^ڜ.Pyb)U^%?vl2`>\HTAf ̆!(Y h*to°F|!n¯zFYR\]2%&)ɒtBrTD^ؼJ\=qVzDN!}IFmoloq- ZDLoI|nI'ļu18'uO]kݻANVЮ L!b".3Vi qΒE{ r<_iH$owo]9mpҤO/2l]\ݾͺI;pCͺ uE^r_=}I[2:ڵm#=ЍјeQcs2hek2ٟ%^տ4?dkC,& #*{pֽGasqi&>ajvkiͪhdt!xg X?>x-'{p g?I._?w$E;6Bf$6#4M5pY6XfnR,14M \a iǢ/[̣/?a= c _zq~O·d~sq~;?*˷_{Ş<:e<:U{zwWo =|ş_9=W#;7̿+>zk hݶk˥A |rN:L2߳ro:.;oQЎArW>V~$LI#WϿuПg7~{ ?&4%`ΐk8N%J^埨.7MΎmE+riGW$_zFL`rm0Ezm̷.bj `U9,a@~}?:?Bk[8bp~}yzuw= y3Sn-??'!O=wTO'cSOwzn?tMœ?[gmZCtO{m=d(K0㫫&2׻@Y!CM+OzUz<2&{>\!ٿ :<4c3y B},qǵ*bpN&^C4pU$yw%E>hZ '9şZ ݕ?c&|'!dnQi|>T~2cLį(nzi!ÇJK*7\Eg"gl!d6=D%Zo4O"{ƹ`p@P"rd6f]%sGwq?9"mH1^׎$WC_xi-契W> xs#_E@fr{M&g~ Æ0 s3N^7vh2 +dv>V@d]_%UGo93c篬1\Ta3SXqxzqQ[1d$ W5z&l?ƌ5ot?OQH4p޺).ǍYA/{~ lfq諥X#ɒƗ]?Ei鑺%lDc ]bH4*p,p F94`Yt`<]L=aQ*'\3n%SAo$Ft2p^ Y<4b N"SE +p^TcDq&$%#*dyH$J\LdZF`{ ML{>W|t*oեS)QZ3fhRh3P^ HO8K&B!fN&z/y]|Wgu"&_z;ªi}D2o܌~gH:1nFf6hLfh܁QŮm1}w/>GҮ=3Dd!1TkY*Tb:rk;n睵c|3'<(NzNT2}3={f $z>\?YfwlW|waάF.(B;g{) wGFS4Xw4ny9=>"lN9EAm$gHAt5̂#&"D"!Xghф(*X8IE0QɝFGs}8{@xp1>/맳]{w9@ۮՉۋ5)).?u֜֌,Lno[W||aDt)ON|(2l4~1g YRpp\ڻӞW\O]2I|l޼ҮwsV/H4ȿX})sC9扚m.7jGHCЅbk]fgAx׏YE/ҋ$xwAF)qMiFͫZvDDE%w lWi.R$wUI\X/uNcL'95s)g٘z_մ؆3w"H8EI& "tFHxLO#=FoUW&.Z!lSgEI;Zj[hIe0WDŽw ژPE]WI߫jwiiC.^( `U?3xSn}u1vO#]ٜStv\= `ؑ $y)g%o^79 իֶ ?Vg[kq+V3L*>Rm+!8 f..xn2DhV&j*8\ʢ1.[l8T$cgx2`?yD 9.Zv(rAf/ǕպKr?!٤Tef(n/9GwP,1.`M@bt}͗KvOܕ|2v|iZl5|tBsNLMe*jVLB"%t\d+2ӭ]tg:IRꚧ!n c֪P}yoz0ve9U36׼#h)f)IbyGfC3!6xS+>cqnmͭbkL{z0SD momw hHUc@۝}6a33Mzv}RhēT>Kv¬Gޮw`-U~Q{'tu0 x8ve XTN1WqS`U[4V 1r*?c$ Zz`kZ uczBb< $UX*p2p$(!+SO5YBg,mP^ke{ ݝM6eG !O")x8u(JB,ITzZkZuXǔV \yLs .EϢݗA"m d'LAC8_YLCTUMc w 9!,A8ÜGkF Ods2W[|]PAP} !M? ̓ռVg90ݖu\ B0F,IpʣUaNCPE%wEaDksH\h01{vGCp"9H( W$j!:@Vdy:\P X9 5W +]% 박yGfH2Uf, *fK $akPm}oll6^Y)C`Ƙ[!d-Pl{@j\{xK"#?7nۋM\yafjo"apƩ?qjx|jR\7^3ɨA0Yڤr K5 @suIq]r h޹oi5l|Q.jzy0lŎawtη /eeK5ᩌ)wg~+A[SYm[L +%QrݳkHZ5ݳ`T98gE)kFwj1`T! x-1gc q.eIփr;b 1(rƯ>?;jOdiM|%ktnːP ĤGW}mj1.\qT2g|4Y㙃铦 |MF5܂B (d<^(gOKdXAJObJO'hPHZNzN$M|*QXU؜Oh6-. \U4] )ԘVu`Nb4x b6Ądu ĥDs`wԥtH=18M:U Z]:K)8 qKyS pѻxM!zF'tda!ESL$]bhHC}t ӞQNQƵ(,:^(~hTN{/2&-t bGENqz3Kp8V(KD\) B4xP 0Q_xX'NRDuXfO}7]Qu9eGU^;00y}MM).9+|,㦷^xof"?^V="Z0h-`^g[[eCM ԄĎo޷cTNɓ4NT $`O$!2.cA5I8@(O}y,>$f/ bXţy Xέm/ؙl9Pi$V#ڭj^pq !N) Z$ ш28ADKHjƤҊ68Bઔq LQۗcTNth@wR$2cXdI a nFE SI'µV^.X FIL0$ &=EeZ'*-4ENGU\cRL/$fvQVS^m1eW|=ȥkQl7ju遉Ogϋ\sg.i.[2$g2aљ*O2 Q(%`-(4f8b*Sʺh.r.ٟhnDk $Z-gάWmKlg V +NUBHH,9ƅEcQH4qAFlBmfۇ~MKY?ͷ룹sw|`ሤ X>^R*23S\ǕS0KKJ,IǙxⴓDyj߃PQ2dF?-VAA^#ErZfxOL% ]^֡~6~mzr V㿍xL{K4|0SQէ1Zh wNMo6n.izŀ pRPZ8`^ }>V"< 8~Bp*"XIX/)#(8Q Ko F*˃ǻ_ƣa/ŭk4`q,w$zI]1tr^+f ])H9QAoywPkD%/W0l]o[[pAO^fkc]t53}~uQ[CT߿z:p߃3d0'Vǝ)yݸ#ů*nh vtxB%RIʹ>[Ɔ'} n wrc(Q~}o a#ykhI񋱹IIqde؄*bsvƜC .*-Eff䞨.ݛW sil޶<\ٌi"Iˌ=&cfĈH2b1bRG\o %xWcJ{8_!!w5R }u NdN ^mLRrUϐԈ!9Ĉa3Þs^K\?w'Ş;EW AKݢ%8UJkM5m5`z۬Rt*uWAkG7Wdqt340FVD\s8%'kIH͢< {%C[ Q*5wHj*QJ߰\HAw|@I*s;*J˨j!U?^= NVF]Gm?(ah3̘&SnjYʌ&+؏%UʯBI]39@IywNUw"hK}aoàSE&0B_-j^[}-_{Yns*N+tL{&ضz)JmIrw]ӚTȲF1Vˇ{ яu6z₣ǺKgm׵߇5MxQjw"G/p:,θzXm*J'܁60qY=W*yƠȭz퍯ޜ Xf(Us38 *.~8J?,4TIDVAa-j:ϊ4p^hPyXZX׌a>I}+碀V)R`7.p>j>y⪑>m\ImO&N)(G'UJ8{ʧd5WZ }RBO5WVJ uTєVZ*_:*[*%Qy ꆕEXȨň2-+:i_*%l^y,%ث40حz6n3ړRbm98sJSf[N(<R" -Wd;"h!|h^Nۃ^~o H|@Ġ?̲ƢeH)8MAEA7kpt[!ZFQZ+>?|H>𶏶S o㌈EwJnSb99;kvMO |w@nH+ v*qGl ζ`gmmF:1,Am"PXڇكUxX퀇TRüBܡ#R[Y>oNYr0!1ukǯ✲-|௨&ňk C[!s_[BQ"eވ|P,<Wv`WN)C31ufۘ;TDWat6l/ǃKۦAc1:D8hv7'tG(U)'Tr2g,R+|X jC4r|ldIP_/H\(VXWaaL<ܲ*>vK܇cu+6cQ:HUcbDVdp)]At8%[~Jjo-/5V[pE'XƆ-?‹XV.2)`猨D(JpMtdKr KZ |MņT){eB N*tWt=VC8yTZ;%9eSl@(Z& \jeבxVԊh.ǚN燮ycZMكKBa8 ޷p6DћChnX{M2q?cFm_v>ܯ;mT4O8fgwk\Eƅ~c.Fa|Q02j|Ϥѯ unc?fK|=:]U32W/^2 fQkUx. 6mh0}yǴ/=E@6a첮qy5^fuϝ{}iGyg W0WaVv-Q& e"yf+B-4 yn d |zq&kLDEO1s~vOw&_{3nֶW|hpk4>54k6ݓ|=-pN5wܧA~M\ɼt(*%z.!lg) nGx Lc!W'{) _a9iϭO}5B@=ƀ7s<7c۝ "}HQdz_WgOޫ~<[~]ϣq(ѳNiV:k0/1.=+4QY Km(0x6k_^V4Q(>5oO`lsun-:nEJkW|^.LP٘Ƌ/aLr#""#99VfZj‚b{xzN"y]'p°8aH3Gч`&<m_EmD.j!7ANEd^o<""$y8g81>fX޾}[(Dp̐*E4vVG#gԦ `A 2c[-aY'P:9QZ&L0߃dpEҁf CT Qdtd 1V#H/m[mru Q)oro""JjqH8u9yty/5vʬ$5q,DA>0e80 8z䉈D01AtP[caopRgQ|to"F?}0v Ud"2,Jh>s5ǒ;?숼ۄc0reGbTy1b۰486>{07?iwz 5AR>I G3DPwտH{/^WWRYGO<։B${i<#)$R"0x7~M)*f,ī+nVPjOa:).풯?Pzi/_[%X֨~sH-\ ib99-/ξ1S&+/=gO4jB1,C,LI2G1҇`\l5S녧gu#|kxcaM"HN f s Hd^C֢@0#Qdh)Z'ZkM[ >a y8x) '."Q[xPF}s{ >]>}RwʪFq*I ց[ˤ!P9w s̥Wස2U–H>B ;=9C3 #i೘  U bQyc =Ҁu7iI!Z@߹N&L7BJ!ws*~Un)׻[M< ,L 2q:XLBYfAX@+9 JR`6aЊ6BNcYct/gS4WW9ƕmS lUAራ79)tQxX'JB^1'x䘦jmd)i)sYO}} 0іF!0: j3PlF+5rgnI HFRaT;iHlP܍QsfazEZ)0'Of΋ũ䓧fW˃r$^8TS"oL$"l0޴i;8mDDp6(oq7MG / ERi[׍|,WzVpS;f4k]͏ S2L'O}uAQ^KZ٭1IFB7ʅN]T(!&SW7UsM}[d)=UTcT '% քIeUSY,%+EDW544ͅFs=|eUP`xB&(tlA6\$\q'jhYP+WBZ#D#BA QdeՙPU:a9p^O]vkү|Ft;K ~K ĦgFjM.` Eo!ud+f:jc֧Y3fHHgI<(-*}>NcRFwQ_LH3 Ub}sBiJ,?l kǩVby"| v*4l{iXkL^O&?N4c'-Vw'Ai@-e8I=ZOƹMkk=O$2:=15|T_mKtpHwvr\ǒi멙t*s&Cfz ަ4?WȇmU=ZG))8YW }W>q"H/RtE9`9Q8qK}>(2XH6AHlU}E>nUXQԶ: gAS~{&6CHJ"T}[涸WH)Zen~ 1DP!b,;/UT0 9',8BSCC"1#6h,̼jдe 7NBnI:*85g Ɠ0vsB*s6 aK(>M ?{ƍ_ؑ~.Moؠvl _ZGvH\~%[I3GШq[4Esx^$/IQz@z(_CN1EāSX6<c%5+ 73₦͐S^6:P@8.-rsłL(9LD+ Sb\P <#(&\(6cMNjAVChg;2Qe cW&b-, Z*Jx Tc #u]rìR/K5["/kaQ`z" k-xZb60_rGa{$vC-q.LW,AJs!Ob]EN$qr rժg)YA(7CbrN5G! 1 $ĕ81B&2|!3qR+s6dc"+FG^p%c+ M`|:~D @^h P>ۓ+N X Ĥ1 $5M:.X֩ҊkN-E:"M@DWD۹ɅL(\v1[p(G!RAJ|B"#p:VsD4L-oklK Y BՅ=v-|Q0U+T~¦e9]i-: LR]i\ʿq)Ew%HMV'7.5lNJQN%HlW^BFW\*>R*([p(1RQ:lxCkuuC]͇k M|C޵jW0fb(ҬP<{ " 39ɹGvKсp{1W2ͬ&\ Z N=W&kɑR+ *h )>`m 2 Dr!!]Mjf1:h0϶&T+al@w:<ߗ31)sncQB;;&QKG4Hld$Z7s:(HԺ TT$mr: t ɴ >z"Dѥu~[YQ%HJ-㩠HJ"iͿ5P'QLU=[F1i͛"eo7wC2aduxR3l=,Zu1eTtcuAӮ.t;hlynxBp]cÙHܞs&q2$MqJRF3T<ځrO4TP}=o<*wO5_>UdGo7M *W{~~вGFs6镌o; xyk.l%vO##N==22qZ\qH9Z| *yki\upWrئwN[1AРlہ#]moi蛅r`Qw6g`v%qofMf)ȳuOVW VZ&H&1 ЃL1on7@cȐKAIB4%3DlfK,&B\X cO{xPj}s?M:na7~fhBIQuKS4{ hI(‚HhֺzKp"E(\(?!LW>iwUaj cSBNęCgQ=C'8!v2VokC޺uk5I6[Y1RQ^=i+OMzy A,;-y_rDpg eƠAʶSLnI[N]O *{tj#̝iI^7jk9c5ajQEPo(1$g 5I7 is>*-XVºE\EoC>po83:tE6K(aju,cR1lB]>~y]笄[vf<5RܞkAF6q ?XT~SYpϓY;ՕfzIʲDd_RI`3%u^2٥kLR8/i$tf_r/`Lדa%8Z ]x_P^%=DYQ($ 5I^A-/c іʃUPCPl( ?Gc0*n8#dAOn&rB NCCԗԯ,VpxǂI7\N4AmknsRϮݘ)TRP}sOϜP>.eԄ(._ $L(U(F9lHBz8PD#"_]TZˇ'fg/ӌ+4,OwS9҄ 3m͘`r ݴD{rXvmzy,Ǻ3^[su\^L=0_>ۭ̾I5A,F71@ #D2̄X"t46{)q3%#b^ q|[fō˧_{-Reayi/mf0wQvB2H9cȉcH:ˍKǃZ >Bj"Fgwt{y99~&ȑ"uɖw϶;3~Mͱg%~Sd>)Ir!IkO|T!'r;IºbACTg58h[MU^żKu1^n}tРrݭB_7 Pa1hrϣtiy#c QzwޮUnVOsNkxWW"czo7G Zב+q$x3V%X`pg&1k;?;~̛1o~f<8JJ흍eDRI>D^~>?`3I vqh; y-3!E;reBSlUzHub#tjШ ᅗ{9OЅ`p cmOa@pѧ|aowMG|F+F;E't}<>l{/l{&>b3t!DS)ظRD|tKk1[ LL+ˣ[1b ̍j4OX9Z &R&{erj?nHN7z>v¡\8}5\ߏ^Ls9j{ ! n)wd yP6X&4wVqF0]r B8r˭3 )E2}_QEIDmQWwzU%S] :A9 pr+tVk)dG8t=U"$%3"dXicI"C1u.6N}x>C[.ivw9X$ӒqT+*h&%g 9suaYϽCLD)8\Z0cb"*D"tSɾ~wYo_}*5ogxJ-4b&!0hhS!i'Pݥm.)Q#ݠzALh\H62C.C(/<`iM@e޼AH*W#Vcn`:} \z?j{%42E (<!2"іR"WcMKOaW:ђ2Q3 \&Im@ gI51]"I\MJJS PR"a e]I^\JI% jAIZBI&;C<,rJ U%M)gh= }.bRAWu]Q@×AflgXvI%?^]Nr={up@g^,8y Vލ]c}4_ڧbW:0By)]} EU GRތXhṶXJ<&]jlnnj(w"#dv|{|eGiE<N8FdyTQ2xᷥCt9ƂOd&k*zmP^o1׷gNJ*g]H1@뻐UIRiA5HB-H^HB,~K0Qu*t;s&+E䒚bFng4ydI?J}REzd9˵ ى3"*@ܨP_`0 غ&&]0v]ʏi᭑]G -8 "DN5 \GfeȻroCiΑS[ 1ggǮqdjeYU^φwii^=Jd&`{ڹ8 bЛUgn(kJ4M_I'aABP>;=sk9e]P@$ H3ѝwFou6Z]*w(vrn;Z<-5)nje6Um.}B&O_x=>mx,y5BiW~^?֒j5 Ԯ-3"=p&#7]bpvaZJ_2R5XR>.sΌKn/fHlv5/ZeeNlӲ2XJ $l97"akn[HMs Wa6*߳g6؁QBYTl2'Ȓem=Q\tC]q tjԖ.+y:".˷ˬpn ٸݼ)qdM-F0ybz=vN(t+ۣ.`]A^dp,%;K: M̥zSA"%,S i)y( s8p +EшG *Y.D XS5ʌyv %7gCRe =hlJYGlO)30AT_nx؎<*Pa97{Ź>m8|vlP N)7׹΍?&ȥ,G%.?Tf/vijE)r-Υ oEkKrB.D+ybFAnp @yq.ɴ oI_H~ji%0C˓n,v~}5 վ}im(!q[sr!PLC7w Xyv#rSqb ŌYD"`XPLT0uˊ9iivpk HȕybZS$B kB [\Z<8-vnl}錟!~Ƶ5K,} 0bw)s~d3Dk\crq DHS'S+ey G0# `A떢*0g묧3_E݀leY&ڡۀiDXpC 8ќ:VH3$Z!,Pw)IVT:G;J[mNgpxlF s`bI"4i1`A"J\rłИE|Lc8zo= Nolw.t⳧[v2enOぇ_ϿqKQIݯߓWSBZA ]ᯠADDGN[? 뻫 GY,ajpw8(ъknnIn<wFϛq[ $R2F6j f~0VXx2E8` x U j}}A * _ș` jIm@Hطd~?|O!Vzσa/+ebXt{ ő&y`1 Mk$Gl h^&!TL/n 2De#_ mu펞A8M 'OF`Dw15w][MBo:} R4P=BӸ8`A9G"hj]O^rg_<1Dy1vAVЈ wFF1 =PBAwJu{QGƼGu: c q1mPzV mNm$a;w a;IckO|ž<:vσ8z!`,' 2#la7 [Kun_Z=d?83e glpoWX{;db5-N*̓H7μ:<'cG_QZs4Gw62pY66&M'Z/$87r+كs.CsQ E%/=g %#)yYHi(L# 0y9)mbk/y:J^ɒQZ vJ괞?oԦGL94X(BbnKxT xDGg;eСhw3_f/8<@EATG8ՊFQQ9p]Q8vؽ/4 ~%1L@\#]b)d^^܏f?OiPA 8M["f j,/ ķG](qO"yZ{Ckchy̶F9xµKHm&1oNǐo()ar j)7nZ!r{Zݴ ]OF-' ƶҷ_6N?՞8 RcgtJrtXrCmV3@3j8!i4m16XDrcGbbn ik .J |'/0W~t:𥿍, 1`ۉg NL87߷6f: 6p&ykB3$zT̄Gۭi5']Q%Cu/1g>4n\K9X1A4CtoxˡcZ4ue%N܄9BTTg\߽/ߕ.X6Bɺ~S hkm#GEbN;yJ23$3/sh#IN&%nbuMU P]|ȃ@kd_,uFz5WP}FԀXKdu`I!&Ud]a#J&hWjjR;[3:}ɏÚi> ֔r1:gMFSI1R7^13(Bp6w򁙎pt{W{նi;lEC>M\6q'7~tr6~l[kՅ7n^@eȰ}h? cq T}MDS ,[(-zk\PKWǙ4S;zHGfFN?WWIgAVAFןvxu3^)s+!qC\S2PmQ=ßS =J^{n1ëMߒ23ڋ$eX[3P a=p}@ƙZP]BL-#XJΡ'P'T59YfrJi_2gE&`-L]78zs;~Li:c>[wh+@WȫN4Lz0DI$%P#D+4Tw[:@J$వ>Гts )#p˺WU%f:7@4 m6;BF81pm.gr bGB{#1Eì0 ţr3DzH3 "EьC̐ J$b''# Ė<-'lDnYRUzwvǜ8?&6X4Z-b= s˻^9UO}sEit>k0*Snp aPZ<, ~"ko/i8Ζh4]Q !/?t_\/z;L&ڲH&TۃW?I.ӹߌ&*OzIRBw*uUدfѪWAR-hDP`Iil1j֦NyIKGɤFh)!usPv fѿt O*5x 7ޣ NDTDy4 RtP,iҼ v6Ʃ$IS!)YA%J ut׌L'NDX벿mkAz6T 톏|i836%6}'3}t9j+'kuWtU("pɥ%4Pi`u&X׽ Fq( ~_,@1+>f7xV;ٮrU4+O@[]@;TIWը%X%|7Lkew8׿fk^b+ ׊&}t({xTDzWcHbtl*xoyF! !vߢ6ВHEct7/`,p.p6oW]~x[V|_ -l-iϸuo6oÈsG FJ0TaB\ݹa>=o!}?.hҍA8czavmݲw|=Xh{5ߓɮ9Ԇ1eGxQ)Y[tLxG5G;xfb^yxF5us>\g FR+^?x*\7p`V~ v=9~hnqbJ%ܕ׼_i~5JR@''tB>}r&(WF%KRʤ"'scͥMT 򖕪lM=z~pMsZxu5S(uF#e2md8#lkIJ cъ*" ɇԀܤ:[gfTFAP8Nw% wx߼m`=!<#KHX[͵MTq~ci%(KfvN< z޷*f|=۷2]4A7'ďWj[Mf׿!r:~_cAU ;l30>X tvGT&vf)U9fw-T ;n&}_16:^i| $z .n<0cYV z(#51+e]5ct9fh t@}'֠Ttv\#2p> UYŒ3LBP ')Q[q_Ȧ\) ԛ ٴߐZ6Cz"m+U ,>S '}n QM5z!u҉ِWJt3{t{!W6LĈڪDPg$jbDЊ :!HYD9)8}V-_ezӆ4t`x5aQCYpn+PSԶPh,gC9)7%uP-iun/.Y(vqbE9~g|*KVУvzVL%ewC.3th!;suբ'풘ū޼SN~'*U\j+̘hraS;=7>pKqոϠRB᯾ ͳ]iD-qwsK59X?騺uT -/䠍<`A\$g1z^-{^ip"9 2*S+u\TkQ[BuY ^p":sS,JHdT09!B-7{klr=.VI9_-ـh֋ṪDb>SUje/9FV2xO{ZU=INzƼ Vn{p6n"@/wT9<ǖ= QzX# ȕj#8SA 2py0 IЏ䆎/6`et伽Mu<˅_h^,n݋/C@tY5j;UCμ l^i:[8LpfS8@=q)0-tC(;{;:Ep>8S(J48|)m ^e)]8#h:#k=c%>E1+M4D˭T(ae(-8u )aY(aN[%]mҶ:qM%=ىX|ߧ3tp:^Xw ) oV|D4Ʉ" A)P'tl܉6 hGq<&ށTRŅM1q\DUlwB*,ENQ*ép:yLw(b~\'A'-%WȌL $$kIGٔf#2M;O߮'_pi eIϣ]I\\F2Z,1I"$-)~Kj|{p'Jd0Fb0'KT7h W ,%oǹH^ئ2;9.T6$\$^ ٙKx7NR/b#cR;{|xpC"6ufH-pODsɆ UҔjfAԪKUOo$EZm$jN:Uh&!Y6iF u4$F6i.Tu4 H47mz9\tmMrI"5_$Rm S%\@;@Y*?{Wȍᗋ\ A: 0 7y[Xvd{(ri)ZTJiUb~< y-!9`YTAp >?>$O$K#me QAȲ:q|9B#eNFKSG]aII$.fo&nV8Pc!Aq.sq&T [n pXV1&aeE֕$jAe6^} SסLY9JV]{Pe.uT)(s~PtwA7TS„VtA5&͔yEqǘ+Kh)qtQݗ4nk0[k" H~^<-0EE5J)D|}N#AOY,/5N/.(.-n| =y2_ELVdns6r#2}7[6QNtdRh"`S:֬,|Lڻ+7J%CQi+d-s1zf&?X">ռ*ЙMxϘ'dw=f5CղcQğ3探 g_r ʧJ7 N[nW38=eǛKu!Ɩ^ ,6 78on`!;DcoJmM3ɯBՄí[djm$Mi1I3RYSE9o<-z ~3K3G`8WՈwS7bEx e\9i(ZGDIFU& ^hߑS)U:U sL”jܐ`ʧȜvOUF(%qTSwǠ28@/[,4D+TtPjd'8Dp=rxѻ,P24yks0z!QFH ^rt4 m/)@uK1 QtKB l_6OBdMnCodYE c=_LFPu&#ޫ8x51& 9j ׂ̭u~\!zs1VΉ 5WhPp"+"IђmUh`=̄.S4 z2Y٥g.q1(  .-ۍbpE>Eͫq.ROLQFі!"%ormO渤̍_RJ 0O8(JBZ(VM H(fй#iҗTmtՌ먡D\yݡKj4=9 k WD1*⸻uphV;7:.NUiaPp)l.DPR48u("G+4T:Ϊ"NkogZHHyqI'Sg#T]&IX282\,m-!dorn=.c.gK3I:Smgwը Y+qr<@"Tmm>doj{7Qy7aFMWK>fZ~FRhM 22RHtJ/6Y=clej)=>E{鷒+ *r8fgϳNv.%_=6j=:}DP H?!caor-F.I;>igHܑy9E8eʛi1}1<wr9S kO/ڇww_f;:?ФX}K& gp Ж=_go}z]ߋ؛'|8Ūen2sJa]/NGڔo@$ZCuL>{!.mƈ]°yB]|韗p05'UOc3Ș̆T+6\@m00ĹZxtpa`*cbTil9EUn+p8jpFv) 7k/0HacN'86ad)x0r+2tMXɔ"1M+_,I>)< :x:E,qFEVHޢ:!oojac{E`hyH>.myGm=z-EOizL,<eփo$j-{>M!d, 1)Z3M "&rm\l']]J ;@~ڨ"k3Ti莸Z'Qu|%O?DZ4bR3?ǯM79n4S"앳GOI{ο\a? 9d,[{U.&;r^2BFpN?Ͱɡ3mgǺ k` F#u<AgvoWE~5/PÅ-byS2Ⱕ]-BBBiZ|>8^^Ov'Lw[= $ʚg )*v!'@kO5pG̼y'f43Ș(SMQ zL+[88PƺNq0`U,&g\H&D`R4U^AZySM ZT[*v-hH -,6r/j4X X":ĄMkXpҟ) Ax2&|bh DDb" `32P"Zqb ܐHr^b>Z i+QXSWTwՂjҜXXYкFg@j4Z\7](8Xz\f1#@'As%3_Я&JO߳0{~"\?4mwf˷W͵z~U* z~ku;hW26,4Stw2Jt#RGtSb( V)՜ǤkER%[m\YX,o2ndl*C O7+w^5AH KLA؉s" dT`Bz9Z' &I}n (%Ԏn-ZA>V2BRk2y4z oh寴7jAĩ hvpݹ5zL}v Rt)X{S IZ4uf8eMCh9U*Rκ쎟DANgƕ- [HG?gr|?NqU'DE|YvTd3ְ!KEX׼c̐K{E[{=~!|_p=](o  (7٬V烛//lMqcم\z|zp&ԣ&1m ~ z7 ba܆da?YWTU Ǡ7k cbG}ta?+&Tv}8Q^+t=RE^O@$)[5k QȽ#%e|$9 Ω@griA`L.]Qu -qB QSяݻwRe>ڙ ?ڛCZu۠qBm].Î;~qgqxwv*V\Ϯ@ T & #L%8F =LaKEoLgǭiK~ HVTj,jZ1)6ESwhv¢[;6VPߪR+CSXZijz&V%'Js9Ziy9tiOi"+s<\߱B ҿ^|OHS\3_\ۋ;)'4^^mv4wb (JPK=6dVR}/LZk&I=Q\S,浡o>fgHR򑙤"x5@D%Upxҁ-fLPʃ'0{30kIȜBQՆqټ!3 MZ `G*cYai!t*cK6Wb~ŭ Rۀ 0V"C%2[{ @k2dMۭ5BD oi nChVOJpc;L7fF1IJ[!#.@rrEp,рPJ+%\FeR̨ z5q 7!:Y7emC"ү%{feބ\LrrqNCfc&-52lAqqkW3ġ@6I/KF,C ih$AMV]iYTgj{Ǒ_ ˄E2@~Y vvp~ACNN{ߏD~QLY,5hL'G,=Y*zr y[8G.f?SLq6yCYፘ4%y#\vŶXBU;gmy~_R \1Uo4Wʯ/uFȇh S&Ve$gH%i+@(oQŵ]槩) 2$dg\BEut*bKYcw**| QsOw6#:;a M4.n!u)é&9]{k,3͛<А }2szRJAE7fRKͰ( Fn\|5_kFW.rW?I\էp曷Hvr˪v 8;mN#}Jx9Vj9jIpeAo6ԀNfuN%,!2$[e*!fg$8`T1ʝ^(¦AGԑ=k3^G;M`>ZjᝀBaNR _+oE.ZY厽Jz_G.rhd_M/7-_E6/c⿧ͮR7{;}XM-TWL j?{ut;|9Lr2fn8oe,|ߟU2URVg5rsW*WcC3̧h9ŰNn:b2uQEry-V`{xp =M9֍Ǹ LƮEzo f .1HVZWɪ${"f8iL+cj53G MJ0f&qX3J0lł mJ 8 ;!155naogSi%-HtHMҼmD\ %JXDEJ-Iw 혜[  iKI`Og݌S8Ad>/*v&0FpiѴWL ga輆U1J~ N"،;òV؛Z"k"-WjV89*ǿ S$ڱS{Y`߂H*5hOg~zIçbpy [NT2Ǝ5%^@o[Mnʞ$}:IQo#Έ'$+oˌae mB=9Dypb|n8YŗP:Rxn MB =!@p:4VkhKYb =#TX%-*ٰ\xn !K0};X"7u y_>Bc8JxA[`>ӊ>P< .hE窯[|[+M4Sҹ:8-i;p&4D֠ZF)2L Jsv a!pw*|T\p*#ePI;S }ۙ\s($ cMlbӒU[UgeW0a@y 0.q/*[uI,fo{drZwWt=ꟾ=qYޏGFr!ѧBS Z*@?K(̤1 M}޹]ID6̼Bݻ/(X܃h<Mn u~.0=eJ~I^7Lh"1)rQb4\.l3K&$_0&uiB@;Nx.o%yB$&&$V3\eiQ^k8ی''J%VhDeYᄧՉ$9bZ lQ`EpԆ3T%dᨰvgi%*]%J¦/jes,w,MhfD}wr(3k!qE^lVD 7Qk`*40=څ-H2"USNu[*8X|ChnvePwmm~9X\}yF.ZZKBQ&O5&93R`IJ8UU`0Ɛ!'7 k2O)<4U./:5&V_>X9h؞gXO~LnbGꖐxK|̏)̩mw?>/]dx +~ ;?[zփ:H'Nǡ@-.bHOEP関|{7Ep\7~ৈ4_h vՖXӝyh|g>\-xMWlZpTWp$ y o$MƗ] }ë)Xb{p'?fBJ~ͧ5$x鍒уޚ9X4`o8oMPrۿ[iqsaȸz1jDNc% ! II3V&ش4ZUߝD< '8~.hМIE ^(}rZQ/}~eNXoO/}Ix~)IԤf!$zÍTN/c8i>1j94=(_! N;߭t-sd}$@ɎNlBe.*ӆ&h1hZVdW|fnO2ؔvA,Z:h{]CG ڬ<:P؂,!jy)'ٲ$SvϽ'j}ܽx,-hy2S!׬94GI#kVkͺƒ$s}_*Qo`4sߖAOn.Ȼ~g4e"t'i3yZL;;wo NVI# ΊE3r!M2b10ȋRq7_X&Օx]@ߌ>nʍ$NlFR?٘YZt[?km tO;L1ej; 2K \'p+C/CvKd`WL֠#3Ĕ8VdFW٤mEt~`]Vy<&[{x?'0 ]}k m)D>`%=Һ!p7p'nb zwJkUځ"+"_}qsҧPQl;Wױ'` s\lOsݡ$0HH6$\(Ev;A`U*2X%Z?\pn`~m +ງH{k}~GAWʯ*Xr%ϋCt-kut5z_Ēwf՜VK\п{pT옄vg߾yM^wC-F7]ލbVZxhYjhQ18*?цP1JeQyt7?MM@rO|p0*GI JUFISYCLˠҚ[^p@6S,uYɔh : `DBҸ|p,{"zAhѕלU ]Mœ艎چ&YyI'L2fc=څCG \13t-8k 뤴4}?xl:Gh,*ǵ%k)Y:YB0`2YJ_:2 iX .lȚ3ȘkۀM,ǑTlk@v5l-ci[Q[FMDm. !P}T9%()NSE뉾9Bu)ƨbr$bJ 05Nm;PD߶/ZH١h5O+X5uYV^(Q@z 'z*jk )D $5"Xw%zA5$ &tmAiIs%2Agp,YJ8#R1HY^YJ%ӊut#2եT'e5%;ᓰCN5i1IJqcn3IcwH:l_KY 7K,M;0R? FZB; y: ^ƾUV>pӣx;=nwj_VHY>|=o.Se5o]>1wy\Wr53~ +ݮvFiw 0M ٸ]~gh~֒|3p7ӿo' 1Z~'hy)jŹU}3ؖ+C;GnMtfoRrKe7]v0/ֱ Qҷz=֞h΃ }ʪٟ7?-߹jJvz 2up/JR|W^zr /Ym[ "6 qӠEuQ\zTnS'.-RI$Uw4o+ (YwO!n6HHi.]8+VVOfZ~̀^~A&rk߀hӓm޳=4MNaN% aܰfdxy+㟌>\ܒ2po2ogwqKO ?I6y1yW-ypt6S~ৈtW=S IT*_퉹C6r{'>-3jXt6rͣ'9Zha:HnnqỤm&9vݲgS>"] ,;-I*! N4#b4"JQC$*  c8<"d4LE|669ƸXȿzVZn!]NIyŽ$<*e!pCkN7.yJ6Am,@'AЪO$,r 3ʙucq9(\Y\( -M xXmbOHxtIb.x2uJf4Hڶa^bI > $>;6JAm yrfCfuO6f ~˄;|R7)Tb1Vт$H*q 8uqΙAt[b)[C{JƳh#sc5a wl (mUƒJJއ m;7 Moī/$t4>t T6UCdr^ę,쪕W|@ljq'd;=׋g0Ơ!7ӳ.CFT" P!ub:Vs{$mI[̈;c_Px43wyH~7Zi;8H.Zcc@KeaZov^\Z4n嗂 *hE@ǀ[c2ɖÉiGIP:sRcm6@kG<7Fpm *)%p8Õ(JLv=YDzQ AsUNL*iZ:ʸFQ 6u N8kAEHIbAn˩3TrD艼G2C4Juk\U""1?2*b ȃmt^e2/%>,xB2uK~-m]jD@BLVgVZ0o8_wy6)5K[v񁯤r96#H{Ɇ+n]k0!f9jmK۸[+|JT2rBf}Dpm"ZRCH. #XL!y$kH }P#9o aLm"T,+ ɒ"VQDgGMTvٻFn%WgmV0d`AI6kFm9< ۷ؒVn#GV]X,ɯ, $hq$RB21gƩkQa$I Z=Ar4,zR9B`   Bᚒ:凾*#$tE*MQ@Op3ha`V0άt{;F7X$jg#j _)&=&7:{Gq A\U~p%qJzrI<,s#,qK ;b;}6 uHz0eTdf)%bE?kfG[[H-֭3[K \ JC{3P4Dw^;{otq"$&Nyf՟\^\Er|>,.Lq>MS'~g~Wf-`XP^EO q!Āzn1 8$iܟekuh4,Wh5[]0 <{ #b6o￘M>K? PÃ[dzT{v5q M& eh?,"J yYg~aP9tB1I'{`{}^> 8F=IwюnjXʮdWSm@S<[A66ຣe7xg-]8 a<ZTHge"5;8~\dG8e'Y^z|}uE%\R~QWCͫOn*xq {ۛR#}#B!4|Uy>Q>0anpmeaǏevco{4F763#5~_wGܕWrS95/{(EM#sbv%2W ~%/*(yރ@Lp̥ _XS7tja؛ ev⹿ʐc?{>юǃp+Q?:RF6LPKUjZKeNѭP1GVDV%S7% VEeE)'oҼTiv!sDTEyvy54o%Ϳ̫11z#|_mzi4 ]>VHwqu34L$m1Ȫ]eFI]pS/thՄKaCTD+Z_6sqMSdO+w̿A#wk4?d" JEiI#J ͒G̓vDդ*;uZZYv#l07?U q(H/6^yvo347C<E3,9_6Mj!0ǼVqǘR* VsTh#;Z pB6W4szx$d Z|[z 1NQb &Cߖ3!hq0Q6Î2^-a)b%TJꭡpQ Nm#Nʴ?d~KWvf3ַ{gIp~˵-Μ[b}j1n>ӃHӍ:*«7 ޷#FQ7dWyEqYb6XngqjXn_M4s^R ηHm dec5B$\![Rmdc )݀A[0U&}+:ַZ`ZbV~!LHm5xY(B9k|P*Ы@K7{r\E`梍Ecڅ}c4Takؕ}<ҺE)TT163GmBpa|ƶ Rp-L[UyQSTQ3$7F-rJ 9vʈ PujʥHZ$h!ӄF$ee6͘gӧ\A^_=;\\4\3/8ֽ۾s8t/8DB 5z(k=-HcrV{ITBqgneqɋ$D(?"A48ơn G5fSjMӻE[%~ ,NS+~\G֡*`k6mUΎ+gðDzSZe<9v9Zr #ʝWe^OH\b/ƌD>FX!YhAFGHaFh܎}\+YӬvdt˳t,jNhYT#>DyljQ8JMg> l}Z^tڡ*nm%:%9ȗ  OW6As-OKRe6&_Tua-j7&ii6mbu M ~#bC@OJ•ET tj1PړstGrޤ̩5,gNi 8X!"r->'55aL*~PR.SeQB.>r ƈ}V 2o6eaH2 m'8}9cDFWsEBmN0o`q $(uiSmWRҭ{Zy Zz٘>186Rڳ'@U0N ]slF and5(ul\g7UaĞh16bsz2_ꫩ,'<; O[J qymR"ؤ56&QNiX0Zdŭv"<W7wm(qP4:>0y%pf,ӱ*Q$lU;͉#jxty׳ۅ@5h ʹ%$'s؎ۜ]HQA82 #;#tu*8-8Q%=}~-B]MzЌ pdaZԔ.H^N{|[C3͗/Ds|6 uHz0iRJĊ)~XE=&g8B{8 q>ކ)]r}xQi0櫩!dmBtv8.'%bI;hS*7G)T||0Gbx3kj=SSʣ8r<5n,MM-xRŒs!h(TZ+rS<URELib#5#,DY (DB[׸e=YZe}B&2é]uqޕZ.6ШVJΫubiXM8]z $R=넅zrPȡvC"E5ԮFh.0-,>W˻) x9傎@As L{fB]`^rk^Y PVJŵd|u D Gɚt)].5Uf9YrUZ3na="{φP*hnM2*PXJ s z)Q!xΆ8p<##iNk~]AesZB=Qn I!0gK 5 ϖ>c||ǪRk68bvq*+Nu"?{WOwwvof?%ķgIPn~)zS9ϖ5/YG3W]0ޢVL\HS pqrhFW㲶&QTZ=cA[IT1ZЪR ~ , T*kH1@!$ŋXS>XkuM),ṩ1kXR Wy͘(AO)a cXP)& x~u!l9AxaA8!M@avo/EBpyTPp١f

HYQ =*\sJzb3'4Myuw9hi ?9ǟn?|c=+ ai#x<-BN|!ϟ|)iSy9hA7rP !Km&4nv8mi*ThWN@ggy @sĠ]8K 2 :KQz:Z)1۶Ǒw,Y! h$O7_<'[Ӭ!4}1 ] tA]YD>r퀚=jn пPUQF^H,$G3-PX<_}[Z3c^4/G*(Oszi,FܴJ=j™TIn8U"#tU ^ yaR Kz(Q BQL,h>6RCRZNJgK\ޞJR䌤R\3-"Ze>~R>*+A W"^BGEн}ypDF!9ղX[9ש+wqF}c&j5j)j)A pq; |a%rлiܕլLtax/Uemi#.hm͌AX9]"\Nxv''[&GTq|,I4Ѧf DNcB]PA Aـf@qhY"5a0nP9S:,ΨiPkiF߉(Q`rV (^oz$rs/f3dK{coq.rV) N0!ɪmg*R*ি9ƮvP(痭rEyV*DO=2v>ECtqj}y"~|DMgW,*0.҇OiD.Hhymy%6Btaji,_ϰ` Xx@,T|S =a"̯DHBu>~}UkLWeѝ]e3zv&v1V|r F:i?Vs:p$;ݞ'iM^S*9y,}368L0X%XF/%D-YO=x\t^Yy ׋WlJ.G\9`FGSynNƋC gU2MW[-Ha8qVㄴHQ5uz]kքo@wu3i6ޱ%TCq ԃz\Q=gIZOv_$0 iҫY BJ7YUrJ|Xrbb)&;T ۸򗛻On E7Wȅcxtu{S}f~؊3E+ g=^}L'Ř@V71QBha{z^#uωڏ/9bߟ].b5'D!kud׎@u 6cz() LWJ",!lyAagRD|wΤoCH T3j_)2:mŻgճ{ Oj4ݭKܥއgu&ևoBq4TjfUof٣Nl(}SpVWhM1F0G#]$ᅠl;~(`$6]}CXX9~IBhL|H~}?9!v&ؙ%3 VMӇ7f޸DlY^]_b했XL-i/`͎،WnlȘ?@択zDOGO ?Aa8*U]8B#m?OCcjZ #DιN*֔/T.<[r0tdW Bi:3BkyiQ0 /p 'DQ>ȸMw ?>2Խa]~FLpa?Q*Jm[%G}A|V I1%|hr/W?Ͳhmqp.qM }c4d\?IU_ ?pCɈt߸Mm Ⱥ],7s=ǚVbb4\׼ r!kc Lu_J5%0 ~C,~ASZ꫟/YcI3tr7y\wwfGLs @jP@Us#M!EjhД*=c>GGrs۪ӭ4; g\]_ؤmBJds%:  ^aݽPTIx!G,(Q^=Qkxq8E^w =YV}9k=ʅ/ue}Dj$D txt|SIN @,`ދ_2;P:ew\2Q[uD%NE/VA(uFX Y0ԑw GDMKMܬk ŵGĺw5LܸK|ӯC"Wonn?6@4VG˟+{}e:<ʵܾ`J2UώѨ2"Wt 3DG"6`(\$ EBz)LR=iNZ[HV 3'uuwĢRWRoH8SJ 7I 4v4DT؏J;]nB .&{F)11cetIXȐ& `\;L ZdjMxprs ȑ t pmxK0 DJ;Y,i)h ݑi'5h!ޝ0P]HI oL9@%+1MU` ۱ǺdL7TMtm rʗ :*@eƎJ\̔hQ9nhۏj#1Ǽ^!# O&ծC&!w^߻]񒌤7q>ߧԨVw{ߎ<,ux~sdd?E#mRe|m@~ssrtua2p/OjT[PVL3U<:ڹb3 cI'.=j5ˊj bԉ"`c#wOZx2LД.dŽBUR儂`?fY'u2''Jبgזm2R5#bLVHKЅZϡiyu8f4,6! O8_Rtpa  Y7[3YVUY-遖(b OhU-¼#rli:T o EEz;IH7JCnme:X?]io+?-<6@>MܢhYnQ418jE8F%gd9&9YD&[濓ȧ4QyW 5ǫAW"<ΠaSgM6w̎9,g}u;XŪ }Fa?j ;GBVț Z89@5q`ܔasd|Yhms``DR'zUv~#` %U Ar\= sq0f3[w?!tR 0˙|Ɣ~rA@k!(d*|֑Rէ_X, dU5;6N3C8{ejִ}uV‹yx8.v^fe=s |`] <]]i ;Jy5T.u[wJk`Am#4mokha" gz2*g@'Լ&[j6sm|Z>(hnsn..mT+XoIۭiqH,}Nero˜ Tk;|^:T6^3ĉu X}-׊ nVpH:5:t ,!^dV XuJ3][\k[:7l\?x°?b?sl11vph(` QQ¸2BXBM1HuG 1/wxXiM.yq{ji/iYPTZ T'2F<1"9!ID!!"\6V>H&`uV +fFBCA(Zb Jl?MH4,3GgӷR-FO=ĨB᛺M|"|Ccn3&Ѵ0TfdʈzQlچY !N(JR(FSf)n{u;!b6#9ʦCaQWXz )kl)dW.Fq:uvf%V?̦x:Jf4T`dwof`ݶXjpӍzb7u0z,=c~{v4SO郲}dۗp蓛=pH8]:,iPzߗk=qXW$!\DKoN^nfV)Gu9n<[EtILS(pR-y,_`!6h0|I18}|':6DLL/].Fl Eu"{[ - mDrX<S _e 8׭՗g\Hk;3yCh0*ߊ-jb뗼mxKմBp[ƭټ[sk0M9Yj}qcO ;yqAPsoŗu \OFUo98*1?(yl~$FiTGmJN|PԊ14j Uib!: H"t, eܺ]C-%dq"(@q(QHm; !4hKAE ∄'Q$ 0.a'R' $QBREbA"L"`M|P+ 9 a^C 0صYsI_V>L0^(]FF٫f1Z ٳh\s4I oza?֩1L $MiX)8S yӽTXGO_{;Y'} w_,Ȋ+g6l _ebpv0$nTCHR9}0asTrfF7 w*'=H14D8Y`JAG6$6)/e y?z`3d lJGߎbFOE@&< ,`c=6E@gYSjȹ.;Ri`ji\ʝ#3=·7[x&="-\q!T3_ũ8~Ut A;*]Ѵ8ՔV.}Rj9k2Ղ#,>e S\pڞAUH#R2y?YIb#:eȣweyNVzڭ y"ZJone1}n{lnUH#22ں^nVLb#:eȣxF") 1VSU!!\D ]4]zI]IXG$ T< "0{xH Sa_'eGNצ9H0cC@kop$BY|9Nkv ׎tai/зej-L/iܽ7bLTh?#lRWT9k.qAͮ}b-Ԗ_- ]kB7NM4=藡uG}71Z\^]04, -w`<}'әL *bT*Q$#t[(/aKX>a=l7-x!PēTr)t% U~,nO:^7ãh1li6-n6kY\[CB8qJ`^|G5wL"q\F9iCpXO$@bR 8; Jּ˯qTPv[ab3gla ' 4&2LXa'+0bBb(O!,#ȩ@YD BeX>(*Ϩ^ǻ³$MHͳ\r0f2PjDĜ(It9!%1Hq%18bh%& h^W֬$ILWjL %:ɝx(PdEiD&TҷV&Fu{_Wj9O) T1S"JXbě1 dbJz@v Qs-\0KΡb6kw>[II$<%\F~hH<pyC`HB,cik$p'8̘KXQ2ah(Lb[9d` i4:[np. Z'cCU`yBݩbmRM{mFv[;Y>>0؁m*QV:G3ZxJ=)ybl3U{ } zm^l tҏiW&Ν1dx75|u:Igr&Kq'8Mî36˳ApڃV;W7i< (-#<uN:hw}UX/)$siO*3u q9I>T8;Q8bt=>;PGTMiWӇ?Oe:Wl(yjpt?'ϯ^\qy>b鞤?ϟ_\˗o߼}}jF{_W]]<7+~Zgk3Ҷfyיh{7zi7,BH;/ t4Np)/ݻ$I>϶M6 uzKz˯CHʸsgd4 EZhO*Sh/s'v"nJGa>T֝q:~ꜙOəQf+e/醺w6o95Of#ZK#X}u;< ;)gˉļ ~ַglLCۥՠQw%/T=v$iu҃0:ΤcO('̨/}0y5ѽqNˡg1~g<0_S,8Ƙן:}\pqʨ7wckGjxD}bfs[3!t]u0n=/>)C `Na BnzR'P<3 F  8N0W`>|^}5_ ȋ4F8ܪy }r&jՀy4 dlf0I$  P VyWH\ٷ$>Z 9MhjzM6bKj9v+e7Z{,jqQl'[4E@}˙;šN}z9z{5@6g ]9Y(1bdIcoKƾ1d(&eգȤl- OIYx=)֍+d4?))nVLeCcˤ\iIBCÓ"{Ry_{<Ry]Z7]LGL%׉ !Ioʻ%0͟և an1}ؽn*_#[U9 nf{ƍ岔~Jruuٳ+Nv$CV-XJM~ @rH̚I%!F&}l0v-A"|kD@A#n춉q}Xk(|vҬVnR1wX!14Y) C5ҐJѕpc1/$*SEZ:aIޕ#GU #f")H3Eh23ʒ{.8p\Do˚i0Pf׌)BYͬsQ(m 7ıBz&PjEP$Rک5TDݧ07F:!2-au[a-pFaM _ˉ( p,.H5$Ę({@w"7VxeQJ 8 5j֪/ ?.)dK40\csAy1ط-;TR)YK2,ߢAn~}NdAvzödɆAFT)&CT]w3"}J>ˆ5h/z1]t_N5_.Ce/j;/#܏|o\s0 8#?{s:$L,> ^ssq:@o37g`A>Lg ;!8WG7b,9SLUm9`q'9HhT[-&V=GYx ZԁOjPA(Lw «Ewgu+z=L\cL\3@RĕfH^%Մ"ѭv W -1`t6! *@Hwеp5X.pS;ft缬&#SrMaעul% /W"aj %{ULhN6e4 >9ҵ#J E{ٚ )fh@I4ܻB!n<7tV G$g0%㰦Jbf "TS̭pW~[ :Mj|%B5adNJR ) .}ѶD4 ay=C@ GLR5hLH*wlKC]!w z8PNXiFr8P%k40I-)ZScGGrht4(j^2*HZQΎ 튓wV 2`WaX] Qg讵)w4ib2Nq~5RLr]1xqnBUVt5wYMQ -5=1~x]ϾC=q -Ihq -WHĚvDE$bX^Pz!:a:ۮC_^2yYC[4jn=kzysn(jL>~;jdSvx=}޹p=Er Z<xsV}th٫5o_}Ov7HxEjؗqz[ZY d(bHC0;=hX:9lh.!  |z8Քh+9K*EwS)4'@iV߭SMqktĕofӑ-k5B9,eCew7>seH_1zy8 $H}ǂ0ap{_Yoˁϔ]pP'`]cʷ:?"1.H[/yf+t2IW.I2E۰XT-:&x*SUg-=v!!_&TT+M(-:&x7%:tvKȮrHW.I2HdS1R1#:hݎx%'3햞DQS吐\Dd4Պ&JPQh'\ k>(9;~YWhp\U  JEɘ'?'X q qޓp2$'?'d] X aғp*9 '?!J#'h!O8 YWPz~Fw%ĉjCHq%+e/]&ifkr #'GrH]Px MP+=,,0e"*'+d}m[4۠IFMq[\-ښvl|CJoVGr1,u1L5v V߆@卻Y4hG fލoBE2aǏ\"_<Bngi!49GPhޕԌ10; RW"J]qj=FcU-zˆ%/h`}Z3`y(&9K8йT0)J˔ mD.W&@Y?vK)r\8fl NӍ\^' *{|ӛb11xp YAίU z#$u9tH~+>}p˲(q+?~}XLcCT֡C6d ]ǝf軇/1(g\xa|!B5|39?<,p,4VHfqip,:Ԃd<%@$C6lɁk%~|dtԓEO6!U _G3ˋs*ԯ7q /OM7|q~(sW^{SC*e*)ZEs*:'$W{_B0lq!1XR(XYmƲ6l΃Q,F(UgyvhwP ]ShZh[q:4|ENFF \ zg"q;z#RbjAgD;lӸÚ[kE?~"N{aaU_w(DmzR;P߫E+Q4X^xz2Uk{r%Vm7BH[r;kvn}u Հ*A"PnTOvbװ*My<9EvXO& T2|:z؂6a~RTr@TvtE8M8us ߋ4rwP55Uxi |:6q3ퟌ' u$2He,Q`7KR*CҚYKl%2a_)WcNH 85ѻP>b^b%gףe'Z ,H1֋H\,mdn_.Tt.'"ؿ"~tpin5x_>]xx5._-_*̫:#Z,aryAwM铗J+ k^ZYaKTً?%f"gzN7L gi aeJ,BT%R-( AdJ{ɑ$ T=(V2ɒZme*DE a1P ЈEi*ₗFi Rᒡ(ئȳhsRZp4. 섀@ހZڳI$,qTxt[GZ&Z5'Wָux3vQ\M$~f/Uדɪ4XCa^s]ݴ?0]ҎA1+d)+.vz#x9"ddۼWXX X B,uHm!~c8eŸ僥Hrbd>]P5f|XO m4>*1Etj/0d@Qc+5E6Q" 2]EtRHے )+ArD0X1ccDsn%.nY/չO߸Pϟ#>}~d2F(·xo'P*3#<0Eޜ?LgK\T}'\on.Xs1EZjy6%cr[KX#>HFpWVXEHKݰ@ל>L=aeNJנꬢZ'wf)rHTBVLZoFI77~WFX[?J[c)"5'wWyE^ k@UJYK-@7^)f c96d:6!/ceZCy~r-3h>CgZ1#Tn$W$bB-vۜ=f2yX)X>8OҲcT/sSO(e>r)[ ;6(ش# ]wcZ {V'Ki+QP|ڋh be ʬḁUV9!CNlƳ5..:5â|np楗]hYTiaTuHfវw8TI̎UL2N㻿Ln5DVΈ|N$ 3KE}Vk%`!/QCYJ$w=DDJmJc0QtK$)SmP -e Ɓ`j6v G'A!l_z*vLcFú&c$U1fN8:WVn8Bxqa-Tđ%q1vŘ]搊bdMTĭ'Z.r8t&HeϷkxkqi;XXڬe~-άf XkLD/Z?{׶ɍdElɼFɗ`wdžǻO5PK=^̿/J-"LVW Z< 951速LL4 ҚrN9:w@;*7d:է69J4s(uNU KJ9V!DOh@22ڧ @ _rڞ+w)K/_}M5PTi[њ>I1\ԏo2*IGԺ막W?$ pZ@;|,]SF`d]/ lavA;=GKҍېvqn]|HWA`ƦHv@¡ ֪9JV1*$XIL.0x@ҏAJqA3L%2{'PAu?'(:ER|e< uKIJuwRju%STl])})6tTJTJ-(RoT9!JrtXm7Lԣ. \h=fbi'=9̭dY9yo[Ah5qw?W9_xFӐKMv31 _l:=_f4l ; FDZ3!޲"Ȱ%,prlZ"xpyhDYW/CV6x==^Td3'fĜ\Ql:5-U۞E9sr %s֘%Ҥ/^_ $jb6'PE?{N[JkqKIbxp30t 7NK6wE@&xaa|& KLz*SOtObt[*q6_3t\ZbCGɺ> T}|U=9;! /M<*tSўDMDY&RKWaN!5~cV |ᩫ/-w.n֟Vx5,>PZ"m^xC 6="#2*'ˣ`F#Q3ńF^}-AOgdZ~<0{|0Gl= i*!$Sv 3"ez$(#>0AUE]>-hJYahE]l5|7v0i߷ :c aryTQI#:d"83 sE" 4t2 ~W L`<;5ٰ0MUi*ͻM/MfYG:77\MWᥞ/wv:[/}w]{uO1M"u?:v/>ceyנ/pUo\59G~]y8}2Mw$gmOeX\gYA /Wѳ nHz]8r!: !ݺS;MddgeSІfg"Ct-XfLti^)0Yf2[)z;vG *\k{U q!@ѠoqFOefgl j=R>Ѭ 4dp .z%^KM"9hnYuqCkvTlH S?ą@P :29szGernÎ2VL`7-ߔy%KʷPfy$|¡O~嚗$O{n,?x*V</!Bl܎9dsKnRЌz`&dk耗S*sqJB*/@(vvES.m*Gā7$tD˷ꊶ#rylk{+׀ᮀ Bgq3[(>M@V"yĶ,Y( B'{}?|l%y^ Hx-ʹ#oQ/0E?*$T9goCPxo%5XLjrtDIޱ>@_)kHY?˧qA@B+ EEu Dz>J ){$NXF+-c< 䁩PԖxdy MD@9XE \1OHd,0SFצo6lҒn\8m[~ohHW4+4^`M7 Jz 2B[l I9|Rnv=4O.[Mct3wĤ|znfH;;4m#6B9]Zrs>t]woӘ%ve %p2BS&M_b)z{f]ίw(W5!m6ݨ(6?oTa5i ͕6#BhdDT`BzE0y4KAza)CJm 8 > ᣾wrD0bO5$d֑XybUB|">qSI l=L4+89穉_/R⧇edBQlx{\rRܵc9ʹ<ԡw '4ZbFg2Qsvr Wi*( c|xR^84!1եb""qzLՃFcekJ5T>U]iвcRm\PbCxGpt/g}Ǐvqn]|Zk rƤfClG!vHqՑi4)!uX{OgcWbIKXV5ٻT뇳Wgkp&j4ϓ_|1x-sհz  }j:ֺ Z#*ƈI [hVc=x9l4b1-(OSd .t&gh YI@t>G z5Yew㑢w2ë1 ˦F^/JÁjleP B+-06,&ޝܿc,ېt8P9STf-0& z멹gZҳۋ]u{TLm-bʈ 5EL'587pvE9&zdV<$"_=uHF?;Z?VwӅ_sy>Qjz z=BWIOebe kbU|vY.Ӄ_ꮫWFS{hx˶_]݁xɝQA%hT[A}W&TBPEzfܿJ+>M07'4ΔWy_Gd% ҖrΤY{ Ӷ7K~:X7{5yYIry˟1T[*WM~y3|w{o߷tobc7@QZk1%I2w-&C5uqck1ZI6{-fuuun7Z s[?|/OP?=vC] d=U4kҖF3sdF>BVctYBA (B0! \gt.?i}u{Rpo67`~\I&1*(b|YSƠRbfҪߛ+nu,on.ޥY\vr鏭5$@xhzVҌ4Ce1=(V4ZkD#d0qˀ!( "SU( ~vq}nXޮܺ'wmIfA lXq!HyhIJ^oCIlCqs )OUՒ# E{1^YdZ|}s;SM',#g?./5TVm^j:^D1^efE]-k.7C;wtsK݇Otڿ ^meIҋbs[2NӢd2Ƒ :T%Nm3mp?})bIĮokN3D qJBق͜PM7e7w|`@Ltǫ>јZRL5E VE2+mg֝b'y2G1oR HE &BI8g16,V<#d1[-_,p.lc?,G hFΕqc@]\T׉7s!s1RDe^Gst*I=USF/}6e[P&ԏ>\8:XkS(qlB{xro# m^֓AUu'3FдbT|1ʄJPcWmN(&9yԞH ggQXjsϑ@HB+J2p#B-[rprS?7jUz:ϧ/?lǚ|-jCMB,v6,[Ά1/r7RPlcYQu. +FfG%bxȫ9 BTcAE%R+AVwT) :NJFeU!H &j YvNf_;M[oztH"mцcӝt\aj#\1BIg:QiZYAׂ`~ڣ.96"6 D:`3i 2"ќokx׹QU Y@b60,+ *1*0xR Au/fB=H}+'i^v}=*^!^gq$UaN/ E[1zd3!0^%t`hgsGJ'\ INIP&s<)AU)SvG \#>UWWTֶ'Bc%c@,m-X]  B~/)^V"]#Oֽ *r,4,1BL 2,C*(5 =yfTbyV5d)I8kB$$ߞ\8 y޹*J;W O+T6^ əSlAB=BN㻅 ]~c@;xc 7_\e{`]ppx{?yQ~lcOUWr<,ؔH^GԘz9xZpՙ"j9m]ɽz N -=hj?`]G?%ְpsȪ8(Z%5J8=4Txem%ԣJSZщx4=IdX))!KHCPݕҾ;Dkm= 9 KkXyRݙRRK[&Q.lc_TT;ӯdLZ,Ta$a87#OQT1V_iik՗*GS:soofh,w-H4z5Ţ]ޑ#'\ >l6>:KT);G#Ow\a?yHqSG_h!X e .ƵlR.R+Qs 0CYHbF:Ss'6U4w&b$]Z?E_4sM{a5Z82KkEe^n+/afq\z4> sX._"Ҋڐokb}Vt-Hd^0| J˻4(ZSKd9v[֠\!6\, OB|h:?y?~ÌϾ}?%<帵HzQxqn-n[Lm=5I=v^eӂ8F8U.c~[I$/ B#~F pQ붊w~Mkkj6F>q=Cqn8NCǫzѳv4{N?..-<*C'}'gƣ3pjBNa@A וJ'L<홍qʼn( 1NB#)ubb,~mc) &(QХ, ۣ(CTn(pX{y=FBaBi7mGs :C|6!BiiDZxVz=K}RIBMB  mdBn X 7!  H bKP-BKd=)іd<ڸ1ɠik: yx-o[amiRG& a& Q@se$F8!7Ȳ.װT+Pmlզ&JUIR;3Q/FKI58diBBL`CLr RQdvZ 1_-4^U5λN6zpbHK+^Hj;iKz ^vc~zKcd)_bXڮ4][a8 om0^,/3Бv%/:̛$@hvVl~3TM"'}0 >|^8oGC7hs`bS_'^ y<)N: ھ-M|K A6F~+6PYMV0-뭊'D§'|Q28`X Agj}nNuӪPZEE [Rwwjv$ҏϓ5O0* Zy) ~0ϯ8L8rI`A_̟wRcDT)U`Ua]Æ_WxTޢXϽ\`pf6r܆Y7nPGt7 әMsuq]g:B EhJ% ZKN8BB7:hjc[(H|h#|6 M|[?,~|9O ˅xOkE-pA E8K,dCR BXf!B?}sk UZڽA#=V(<%^#HuY )K#3gf"=p蝎IF?VyI1B@O1c%Au=) ֘HnJzN=։$LdI⁙@bIEDцJ4$=O-W)iǭKvW~ZO]i+?mߕ> 84ƵxQ-gϋBbY9ml ӓx0ntECB4~z` &5r!k?|g)qz7F&6oﮮg=aw;Wn~ͭ;K~B&ʚGe-*'^ .I6~A`Fjkqw U)eO8@vhqQrUU,R6MiI{RXP蘃RT:zEґF2,,*Y@;;v&fŒb*JxvU8 )f*IzAHBALt&'yL$oHK0-!3Q Yڴr)!Z V֜! o߷;|͙vQ.JVE;+]sFY 9D&yԙ'(4IcL0NJ1(ɧ*hdTל^5g|J5ܞV^!r-DzAE$j5*>*77.{IzױBGw٫Oե|2*ׯ>y F'escdni 9BV)Ɂi@MIe{Ҫ,(\w:׎pJGЧo&߯JS%͜y_uò͚[K;\ a0FiU kU؋!ؤ\&b0XXTj'2h  62aLꇡe LF<Б>ГG1-$#&! :!gdizZOiL/yͽɖ U"_M(9Mm>H:9٦AԖ[i/T1C4,M" m2&k~m ״ Ixܒ$elf:M$ôiPAj-\c+%ɞBz./@YISVѤBMړ2͎r&3QjcCqH}b$C;>nxZԡEY²S;3(ZqQdQ W1>[<*ϳ!C~kl7w[E#e͞ miBݭiAѮ=9 7o_b遤Y.XDG6k ؓ:>Jxϝ$4ȱG~v3͜^K 'N l3<[L}`R!D)+$ Ȭd*]Ohf{dR]QBf+'UDmL 9Dș%B19H7'd[$/iH 0N}ƛߪph-\o䀁lݬ+~7)9/)dn-aC$S`A9}AgĹa6wUX7Fֆ\r!֣x.%kR t 6zhӜ>\J@Aђov0u`,{m2I\l2T|t)3uYƸA̬ҌkFv1%cF9CU[iVћDI 1Om) EtYwCΠ$.g+ufMo4"U)L6a } qAД"dfY[!(4)9a!7 rHCv{x7 J!%CВY5Ke1,+ UވbF Ь9_70hx7y3(6#5bB ⦽,on+mʧ&+Mk-5\l !mtAA\^ 0~1ItBKqRh Bk 5j,¸e$>^rV/G=3sznɁw*Q"GKJv&\ۍO~}rS?[fǿBJ="]KTBba{un^_p Ő h;bh@tf0c~҈adS |Kr7]m6_L nu?LafY,th=?f]|l-SVλgǗ[ Z{8;5ڶRR}8>%uGpT%8\C){v N4[ '5!G`BKԋS9qqL)k,*Ij= Z Quf7]hQxs Rt:UgDF9ܫEiR+P|\,R|!~+;9 d `Gt7j*`|wI Y@ EɕVO?}?}[ Ԅj7_# >](^6~J?%ZkW[ڛoOVR3h4Jt!}X"I۽yωmU:[GƖ[K}xcϛfbS'4?}^:.$m% vɖkSh9Tp*90ni( JcF`SB&4Ncb Xi_6DI'i\YdA"h[Ž>`>HjK8j%>vZ..W5#lraږdTx˽* ;o/r?^9R\{F[857`EnA+2$c>p%#xU:g1xLI0aO 0RsFe>:y.x]Rf4[M IY:iw=Sk}h;J]}Êu㚙"?U{cCxh{EcEC"k[+ S[׫=,NvIsh:5hAf>pkos9у~Z4 ooj-?Mdn s3~ <_ [Zsl&М4qyOi~wuKswC]~n'H> g祢(Ql#?n'BK,і< 7WmϋӀ|~OxfM/unB}X[2a[ןq.o3aH|W5{d5_n=^ de Ϗ䗒z\v|;7ܻ<~o8f Oa ˨G {+߇*!b QCksKfuWuek+-VrW]o^VV>yl<|B1o Ϭ{$J խفW + uq0oin{g'ԭ_.0x1Ľ'!vǰeI;}{{% |P8Ѻe\hv?M9g~ߜħ4oͤ?.N6'vL> Rɾ o*Xy>Fٓgÿ'D쟗-{NFۍ#0i[fB' MʜEWmѩvx*'Q8.J:B:Άp 9*?>Tw|X3(*wԎz;^Zq2FGԻ [?{{}, -8bȍ0Gz0U? yj;n?.mk?_uߖl|҃ \g6Vӷ,e_>ڿ:fyap6ǵ(wje8pznIAZbvuuCA߲n o\r#e{`W'8!Wlh-vZVwWEVqzzzuc:Y~,)Ӈ>pEÄ K&h4Q<8pł |Lye!CfQ^:fy慓ܐJp JhRϕ鷃@7p fC?W߰tZx&aK4B l.쯬]U̒JWaqrd3-Y5J; NA*j_EQoV,U.? zw_SW>]j8)+ ֎#,uJc GPglmiq <ǯ B("bEO𝌊 ΀UXS;Cd\TER=9_+ա ќq.&<{gO!i[X{]DWS-Acوɳ :GP1ZZFu/Ju Pk[و7,& CS \hI'ғFV:-< hՊsZQ:rv͞9k;:_ Ι_ۡ˶:}mN1ZdwP.6ͣX~-5OwWaV^;Xn"fw/VX9uk0]_}zŇÍ_nL.b  -^o>cUfih&L[dx.6̭f6Ǒ^BnjΥTO} DK&؎,䅛hM%uLd=pt+ tJft;6Ǻ[MȦ3Aj:W?eO{ "ѫOx N8} '("uEy,- c<}O(Uj|\7sig_m[5w3F 9MpU9߻̺r~s%OM̨l)DrfUxv_v[hTk"@UmpTynHL[ ~ZPR6&6tldhxuE䃆-Kہ#˨Ol+:%^ (Y Y &a5 v_ 0;2E~Ȇd_{ףX8t?}6㲀aaan,QӴO#[T?bcfIs>w/~0p8 v<⎭Lo" u0{e^:ӣqQ' ?rV8?E'9N=HhWbpBRSuF ^n{|f֊=Gd-P̒eu%սcPOaQ,뿈z n(Im~7qO?W2UZ7q5ekez)3*SهțҦ NRM6'j*}ݻ܊H3\gZD}XP2:)_3͐R؀hfd8jp.Ȑ}ē gԸ+.9.5p&;5ry*'l֓uqnjRNR#[i`Vv&5r8uR[ Fqi!uCjd;Jp1?w!h4<с#jy#b,f!s~1Œo1/ 1"M0b^&{B1b^l7nr{uYpȯˣת'_B/)  *O_K&5&rw+ v^6g(F-꒏d_yf5DנW\-e]kЊ N:#PSrx%ԯAACuש\~D,6>?>xͥ"ijSDcGn;NKǜK[lSoRNzUOKM=oo|wO‹Hfֲfחo~177Sf7")U[*m1kClB'j7k >A!:b EdԨ*k'>LI]ɛuSɛO\%o*y>3XY%k`0Õ%'VFYcY9Vm\ɞ˯v3f0Y Q(,iYTpp"By `;2g SJ% hVb #(ŕ+jxp[\GFp$)Wo9‘9\hCw|R0G;X;4[[:`#mOk00t/lL=fP)nm6O8 !͆f ,pq=/.Lnh Y/aN]2$8[v8E-Fk=.E<:&sn-7(<t&Ԓ8)`+& )E(r25]1׭oKiB (ի$%P/K-ufqfʀv! w2__nMͮ/I7zb ~L~Bn9͌bXӅ~o¼~+6/F :eIHZ/;/ք0m'nJV4_Xj!(TG{ :E)R 6 sa3hJ!21Nu 4.;LF%ϹOs v}*3mjLC[Qq>s}3Ft/f 1=HX~, O(K#D:0׈Q9rIOO;eNBKZ 7٫; [53rĞx&s~ֈAxP|y $6bR$ŔRj4gP{킀"jC):9w\"慠8G$)m%VDU\#C+F]<8J , 1R@, #Jp2(:' U.*P0 Id0T`x}T$qc儢i$ U3fSoGcEV +n#1FHN$(j+%avt~ fNio0i30” Sh!.w#jGa 夁}BZh!2 S2sř3H?֟F_x} zzۺ7n/V#nŇa_fWo@%X^ORR71o1E(B8L-/o `>._+}/wT>3dNqtc{|Z!͌1bn5x6-m5c_coo  oXҮk5cz4^e&ֲȖaͅ=CɌ ert_oA]cJV LdT)Կ_k5|bV$=YNtWQo)Qw״ۉǷ˫Juђ!^ ٮ1g0N^c{V43cEHc9'Zy2˩N;#-KTcRR¤) = 뵫 7)i*x0eӾ[vM@ `t܋C `zs&eLIsd26u: f-^ѬuJŹ$GK1:"9jQYfs YKGsj$~CCCR+OrĠKEV(H&"P wmJ~ O8Ny^'ʲWYo%mKbwkV7YX"YUF(猀X7JPéVH TcKb_GǔDX\f&hCB޹TP0؍ %-"/ !E{&JiڠMVNS0F! €GqxL-kn[P$%"B:!_0_PxpJY=V\Z qQ)M$v #C8=q{tֶCs?,t2H PCq>p;SwpiJF.aay:0ܰm6(tN&֟ubjrK Pdk6=6h虅|(5 mI= awvGj!vj^,ыTC69 RNq>fu9!x#2֔ LK55 $8 *"-+g[%uUo+$;(M``^䦌^~LR)W*9汵/Codqdw3zBjŕ7:9QqcilkD"4?\lOd =87Mm[g3k/8MM t8P1D CR 8BX~V@;&%}?\֏ہb\bq-r6y3B Oٓi[s睌\ *"Nhͭ\hj|K zD1cz^7'&Ɏjih@&LS=JpTVǂJ5 H ;H'-BeA/>x];!XT*ʬM~e`tb~^jI/})~1h߯l߼$A &ZR Z EGX'/pswyAc: 38jM2Q@\~aO41hj$hOO)`a]Clmmk@"}G8bY?>e7v[, 8X`3S]u{+3UN 2S5B2TWV3 iv(԰zu_PL>[jߙu 5G!}񆽡>9RHd3&ϙs\n&[0O= ^hݬ 8((TY+-Fk&*4qZ +֜! {mKBV_wڏ{ɴ`ڰ#APJJ(ni3.0pIQ_g1;r/Sva*51Xv&퇓.B-ڡ!\P=ZO/KSoKXgq:1w,hu0XflcXaY}4=¯|<-u'uJ\s&z̐y:}ɲB rh(kY3JRveڲ8kpպK}6Hgg &-gk7RwqNm+l<5Jg+RjJkPڹ|^F\cg)'ӷÉ-NY'fgKh+ޜaѡ7;z:[ܐ`:t Yu`uwU!zCO#Ψ?Ԅ[RO3z~wn=w}na(Kvӽsq;mkQ"9^|'?eR Cݼxm34"\WŻnX/֋w [>`?8zU"BG߻%"bDzlALض=viw T P.IDUƞG\Re"Ƞ4db𿴔Qp!TK |DY9 &Jİ Zs1$]g!Fvl͔\Swtg9YS}jAhmQs?\/n)rܚkqzLG\OatXW'/NYgJ8_^ܽ|mXb 0.sPGt ~ŷB*ۜ$K3c!)X0(pgB@D)>,u\o}~ 5\[i&/т3]I8b >ڠZg~\Ŕ^hyXU\׿YXiKΓ[ _v}" ?,WX_w+5)}(O65oâ8`l~1]oO(쓃HkyJkX1;WV9vYgWҳ~ +WɠZw.%2U 3T~CAnNiMZMǮ[yLֆs-)QN%`IJ1>hy!ћv+?znmH;CUn Cb%:}4n)k'kڭ y"zLQzdUDwW?\R ;.!O,ƇtAS2k?ߘ:a=Re~S߄U^q㏿ ,_f7a h iL Gԡc¤Z걘XƓgO^˺q,"_JQF[Zz-xfF5/Rጭ=e,:>U+kpegMi !KGqڳB>n,Teu^j?֦3ubTB%{E*q=)xpRݛ)^H[&I>hAP]!B+KXenj06ɱ95z{ˆ:' nMPՎg8Eom!RHX/:9v/MN_ĕ:ܸ0n ZOН+ /a̐!`t g۸8V*f:)%58iE[Hb,AU wqqʏH.)*˓j[U{0,#0(PL3QDy#qH 2Ee>}NWg%?+x1JݧU`_G~4p5c rW/j,&>C/ eQ*0,`úɄJBaWBcݮX)s߀EJ aWpy]01|;s wMg'F'eI0ʂU)#(@CLȤ,R!Nb Na7FhazZhp>)QV"p=`M12%i<`38-QDV@7Rnla{v^P )Fڮj{<`UWVx%b/L 2hDuZhZ[A 2 sFq8iWoq G59QѪΟ5B2?7hS](2!_Cx_u*Ax^Y7qiO*hs-5EhIR8E(Ѳ%\þB7hE'$ 0]:'4;:g@M4¦4Nu:n `ñN3x # h-KwKa!/D۔[uEItGy"'ǔ2+.+]2S(#V~h%fbv\JV@H>pG`_wðr&)01}3JvթO$FL%G$M[fAeI3iUaNgyZ ?;sF)o=ML&]_‹oyƼ#*a1cw593~L<ޙJzyybjTvAG\P%uSvV;]|q(9uZR9iN*37pAQьTC&[ 5)J4܃`F$n4~ElZZvzyM5$6׀:9? .k%T}`Y5@B &d{lKeomNR(9U G)#V \fV W gemwulsDڜ;5c:7;khm.6zH'̌hSh_ZWKDAt#Wrjn/7ŲPO2lG?y}}t= UIy.13y!]UE \ un1n Wkߑ?]}ylp_>zG&i/?Mv oiZWXR5Og&ZLegoG1T6h%G6ޡxpx_Ll_xWޭg*\q\^rDIS)i餱0N~D zMuX7`J4%l꘾*RdԤ_717AV q~y@7:6MnɌO$M+oBt1Yҹfc<<3UNK}0Gދpm`A6S~{X~qgῙ29J8*/smPNB*Z_#rz%(tGuyUOfߒW+17G}b) !*Ȭa`֩-ƭ-wo*'hn (LI (rNtp'S`&s𧸄oV}ji6 Y|(W/ 5j _KV̍+]H`k9uNl־%/3CVAr/~ã /th' kW 9*5kR&5۫=4%,OʁUe450mC$r:(fFh^vBNʻ\}tjIBa维!3ЊQZrU怫 |ˆ/Z|kSR-zG͖tznǀ ;G bLiT;ͧ/}1Y|%ޢ}3.񮯾*VaWףǿ}|W?>q7eO7w%> ^ efMuyXON7)?W4.]ПyX=^<&V(2o(7:c]M8Fc@3W fQ[#h'_>_{Y_xY<:3b߿y}|{{]?/:v//NctH_@t4JOx mCk ~~*)~bYí`Ҧgm,1ZH!CmuWUXUtq(JTjup6^&clSJڼhZ%Iw̍[wJ{S\5ʕ桼uߪGW'ѓ:Z}P"QRA_?A⑓zG]*Ѝ_&')&dIln}zr޻vzO t@4&)SPҗR~i;: HyᕬB:*XcdYqͬՂVDH@ b9ocS*4'U(5tAORsi'uAY͹&C\Y@tPJbcNEԌi&:AL.I#׋UwrƳi2Ծ0`d0W2j$s CKtX<=zo*%ΐXp:yq@UrWhCsI8] Jiɹ-U.*i)Тhk*۹Կ{46e&wR Cx]1~xZ~yo7j\QrͨƿxG]ݼ}=^7T$STEWoVv@HvQuܻG.1Bey)NFD%He%2 FE%P'!%Pta̡tT*-x>X]tQr86@aQkPxŤ :Q:ß4E `_̯AhQy-5ÉSI5~Ĕ\_{>=eŌTc7d`\#cFƮ#@X[v<.8TKc.r aVսo;X g<ܣQvA[79nGn>8r;GLŹj%]NztĶg{@iMuxX~.#uVqX2*m)8Yhqi`b`px\D]4q.D+ cJ"T؋zL-cɋG,ՔtG}ʫqAQ4 1ၘk&n ;ז7A 7;Mͽ?-Horf~+poԗB+ojk#T{  A7;AeI3iU `yFfZო͍n>PYXNUE*'iHh]jjAj)JTUŬuw,8Kqi֖vX&AeJ+!+ @0(s EҶ{-t{hr2Λn4_:rTIV"DQ̀e-ͫ[nUBL(HiUeQRUΔVP2'r3.@3NHyht?%Ky֠PN+%NR˷ Ďb'5p*ono0|iUrUsfo>܈SĠl~h3BD 7WoG)SH5ßvz*I}xw&uBO{quSrޮ"?>VloooaV7\?l,8dscfM÷'woR׸CWn6s ǚ/@ 0m!ZI&}VF4yw@ BX}%i_=P eSw%j"Բkҍ]K7 b=@` 9CIJ6DaH5:>a/ -E,sbwrr(3_[@ͳկ:b%@1Ǫ-<mj7Dl nI zRsW`l%U0j'Ĩcf޽=.pBslS }(}EȞ jŦ+ a%"GlIK9 {3@AU'p)"x z򅂠?9 HT1K=M;#ŝC"e*W>Z;@ձ<^46nErh9eRz_X{!9З="YP:@r6՟;\$%*!"PZ1Ŕa{ y3MMv'\p;$)!>TmFȠ ~uΥ@= bD,DAfJ]`(TBM($Y׋0wd$&94Xq'eYZJE *ŔR(vuBH yni)E6 `!dSd.~`-s# Ra+Z`bK}G)^ls2PeCJ6bEi)P1a)XZ0 (+my #b@, v a%(u gqtX,([Ad@Dp;h,Pf?).*JӜf);{c(56RF ÉdvaNFNu9F2U! p]3~5ձ.hQG\氇YRBͱmؤcMXw{Dkn&20#- Ba`:t|仁7oQiHb=޻!&d}x3z/{e,ӑKL\w1#M/5绻 1jc:x VA1F= 60[eU{W@`<vLzpۭ/zprm҆^{8NF[\R6.m8p!P!;* F\: kd\6jcRi֨ Wߙ+e^O\2GΡp1?!eSFZJLDј CO~`;U  2Jupڈ+~zMsg_=F j>y[KT"ǭeRGU1bH!QvueT2bG8wWc*C +:rQe er{o|2Z?c3_;9VIqx2 9<`#^rKcn 3ђeURqTQ:ܸ#rNpJ*c~s(qǔ%BApw BĒQbOiezsFx~ xTsI \x6 T|-S1}%ZI{"'_5^"~1A+dU^/6q,F tu7dclzX~ *|)"1+ӫ#搦L@~SG((I|jgtDauY)oo\R[Vmۆ7vcA@z># 0 %ܠ(+4ŔsʫHBDB>'CъK&BВ+c@Pp CTADض4f.҆FJb"<799% F(”Pr0GоbCvqR@i INEɀ}MT/Z,`T/}(93V1ezL7rH,AX\IL(&sہwQopq!^& ,8 rRǽ佄lm%IHJ(+IoݒNʛ f8_o=.mx\?<Ӭme~Xݾy.S'sƵwKضjDm\߾Ϭ'3uˈmbajV)r8òQRNj;˸WYܹfm Tr=Aap*8Tlϸ2⠐ɔ m!ڛ}1MѦSͫv=FZ(|Q|{Jƃ}柂)F̐KYjpF@3 И@X{pwKƥe K/Y<9NuD! d6K%d֒9~mC ;Yu5:[祝&1sDDc!jȔ2CDFJcrBLwwz9.Ѕ.F0\1aL@s(24Br.AN0m~^-Kvok͟i==ʐ(K23-P0(eKD,%f֥ Pd\ q 0 l)\F3lǙxi("hm( CA{tv)/B&mCPvII:'ÛKȎr v1[v{x%LG(}mzU-  {F.% O Bct2Yg; ̛2j/Gzټo=b!HgCE%Jeۏhn8&r mZ[[4JxEjG R cFeg{Eo{Q9;ʥY_t V)0ȀG "{;ϴv).mXm.n?zzeZϚLKv0vWMOi˞d!/˯7ގKYkrƪԭY ȝP!A'>ӂm\b\c9R$ox7EOmFI s٨ڋa͟*!e᷻`{S߸_a لٱCp: PhμF 3WЙtft_+ôfܧ;1I gbFR D|kB 6H74Q\lUouG`\I Is({AӣCYuxO9JBL} [Dj!Jww FzY_ީ\v}%+nZ8X7kK$@Ϭ#ׅTR(* bc.&_\@"Q๕r8a}J֠m#wvVu mTZJs_/:kj i B /b k7+Aer"c\,S )(2,(ҥ) *a7E&S1i JS\F6pK渭YO]ޣp**E;0!;-h|n-~xQTwE/fѯԸv)F8%k}טEP2D !2c$M}/iaJ8_gi[2tT ,j!@֣TY1eϬcD%Vb8u9%n\b]?b XKt8#%qAap{WqmK$j0n|)5EG"i: jRRcUm21p$ٽ{vUPir.M-^@;fgs̥фQy $(wKW#a/D(t>isl `>/Ys R/E}  yAH\ GMzC5dl$(M-U a?%KoŘՊka Ereນy{u mFMȅQBT =q]u=Tukk}>Vq $>Ų£ܫ~cY;s.+z1?n>i}Cz 7F~] ϱ8_u(ץ R^%. Ѿ.-OC`C~]R`얔U{ץd` z..!PF iI=_q'9@̲&Sz>- |8}Nu}Oq}v3]''V %9(OPiS т3cN&l8r;79߱t$&S,R Be߮I@e0T"JGyib@IUvƋ¶#!U oņ|3.O@EQ O/;ӲV.>%ITnG:Q^I{%Ķ |\CYwd:s.`}_ |Xw^9^jv[,; b駧Ga 9mAϰ۪A$C*6'=G)6G`-Q V_K֬V`:6X+NWV` aO9,Fk9fZxIIȚ^XE U\&Hh;KQonf(!H4i\qsd֧iA$ J6\ax1 J6Y!0mGH,BQT)Z*jV\* n|^HDZ%OkGvRkEA/  "GaK3kl1a_5)7zbX_`wRD.,% C$r#^ؾa ' 0V_ \0l68(F&hN]$e9)7 5eexr#~l|NVaWߏt-YVx|t:T녳^=ڣx%e_>e_ b22@#r+4H Ja$sLS""5ʵ)o}nqe\ߠ^n;j y6{NYel'MLr)mN9$@-ٮ,`{vq1٥l@;N@v_[H$U'#[ B)-n=XNѧHe =(E%eL~"Fۢ䘞i9vӸ,-)k h9wy1IJSOƙX*ŧTQ,6Ii({% |B5`IFJ~%jUpLx[cLf װYuvZҩn#k{xr(Ʉ[i!*P(J4=]]ϯݠw'3J%]W bLI8['4Y"`*`Au츎rsO q/Rts ܤ\(Z0:^'<q1 NV\݂q9 ^cS(@/c4r^G5s/p衈&/yc̛֣R)žY rȣ)1^G_fN _ΩG^Ή_6c%53TXlw] XxR2B2Իc/p]Ш]FEM\P ȋY Qe/Ľ0K[R)dB^ʊ5R2NiFj3+_\O.8ȭ㻠~D"\yne\0Q^  3Dh@FR,Q)cdR-pJD< A;Rg*cƾ= _ &:0[X:pP R9ekR P!+}PJ0e^CAH1 2@  ׆C:~Yb:~8pyˀWTLOwȄI{sZ\R< ̍u岿 >!lW4S*H{V`$tbH TSP&X+m Ta˸l=Y}13ʫk'z΃ o&F^?@t{C1KoZa\uebOqqlVj Pj=s*z^Bp*e`) &$P@@)Vi HTc ˃Qޡ|ez3m˛Lk)3wgz>Sjx?mVdDsm]Q?>gVA(z#_8G?L{Ć߀qsb6o׏7S8pc\}cB3bz:u{p yo#>HF$7O7}MRwOI$5LVT7\8XnMм\*j8BS5sd;ڵ 2rt_twg5ݔdv393D9ܼHw@>$LK64p&MsVQ7k!wēB!P:oeS>An&LЩ,ټ۷٫]+7&\I jH ਜ਼i*=|~baA/ l/l:Ӽ-a1ʕ +0 GqԵ^yA[z!nGB#$neR:$Cl7JCÏ@v`> 8Ag7Itr./jM@ iRL04=SB$Euî~$y{) =Q&#=Q'FiC#eC/[s AK+@X .ӺI+ DizFXS"SWw6|̖b/we?° k\.u\*IZNh(oc:waRRŮBW? FBW{1= v#R >Ls;f]9M+˕s6>%a2or[֭І)*wuxE 5u7 B8\ܗUy?|^F`mdyt>VGt!%`z(\(-{15;\?:&4AMq.8L.HOׄD^!7]&pKxJ wC?snGe1 NAv,K wwIPwl b]URȀs@Xvz@ó_0`Ňee SCڹ?ztx`bﰋ:|;1|i]s6wjb~JrB!u3Zh ?J9>I/Z-I#,:F(cmCF.rO>o36xb3']mc7~ifeDoՃ%__ү_7Z'jhcV`DFH!XF Gw/Uy0-lA.$ $k[EǍ7:ڗReRD|,2$uxQ8$}AOA>Q t Ds˽,;†'r T+M3imX[ʅ-R4QL(b/ cYL[YPi;JWȈ|N}/DWdIyrz6M{wUMkE-›h6U4GAOht#Ptat+AH)ѭElݣ[ ݪh!Dw lSȹaVq&dשLȐ;̄t/%I1Y.Y:4זw&{_p_=_=ڣ ݯƂT&+mԘ4Kh X7pz*|n00YJ599ЩT3KTʩ`K;MNFwV۩OZYO]޵u$Be!תna y ZBhI)&%9e3 "ˢΩU_Vts7} ֔F]J[vF.[͏}E_㦾VUVs+Fۍw.leJO;I]5+3&,pZcJ4.94W^T'-Q9>Ik2Z&mQolU'ip(&;Q]_X8, ]{J*yG֨Z5 KCdɖr~FfnFӘdA;;o3*m)jxgk'ɟ^VuM'` 'EkA_P`ݡ/(PCi\9;t]SR Rbo$ZpUߞ{1lt$_G? y{_'?pG82~Yy4v׳I>]o>cƿ/4.S'}MYrq|ta/7w[#Kl'|?w%)77:ȭPj| XLۨ  Vo_f͔~/ɗ*X,^Ǎʥ#K-c )00›q`0j_XbU7 ]ac/!lwzDx08}~'%V;y={u]v,)QSLáSǁsODwBYsz n6]ՠN7Tf(NJ*`Azd_N-@pCH`ABkF/¾!"W=CtgO̫@Y i;z66YDGUIMPZǶwʔG=GWkl `6ɌJz%Zx2gl]PA #F)˼`` zkW+I NKħ9և%Ex*vqu=Owi`?pIx$6.NG?55u!/t|Y|r( 'Wl@K yL7Ns +98a/0hereeO,n_Sp$~V/x|&~yAɊF3]3i1Gdޝ=*i/%h%ksd2cÂDO,o|O|ə$dM,٫+W^ﯗ @Nq3H5hWDYzxNJUC) R7BfϘV{=$j#d{: 'NfxG/U܇{ ţa;}F[ѡֲ )]1b5nB1 _$IY" U]#$ Bd^AsX0i'2FM!kb`  u lU|lxǨck{ GWM zL O%뛹`CGT#GՇkf.ofD=-o:8/d^1FA_癭x>}=8YG V^c0FW Լ{_v#i{hxڠZ2Ύ]:;u0M! ~{s}_NkR5;</4{̝lv]_vWl-^yӗ~*=T5jyаC.0 ٨+_c!481 `0a8FSgCdI]vrCj'/";2Omg*/)6aQ+L#>~MŬvk3]Jۑ=<Xt8'S;6j^)̶TR-*L+8J-5TF:Iyϑ9A";Xљӄ78z)yeN[-V1INq#4ȚnRYƄliZ5==G)MЂIL,yL308ƣ)ݽ!pfR#ol%5 LqMC 2BĽ|p[m4 Ija ?Ñ@@根qYXGA8@hKd9RԊovh#wb"7*مȣxbސIͭpeY:HMN4xɃJ;H@J$8 1zH$+Y{dHSک-/5i`O E %c<0 d&5k.c#sїtAZfVRI/G dΌۘb*FIv6FQD FYɎis=DŅDKџ_~kH霜u =JMH\.8 >]F,\%61dfMNFmQiB-yqed`!ˁgi!=2SU6Z4)dtf5LH=0 7SpiyKR[Z P$R3pZnI,pj`b%eU`jY6P>'RZ ARpLG 3d+ ,ԻFnZ*)E7AVBgr˕]cV:k;mzt[dQIV0?}nA!#T~SVžQh(np^4F.xT}F'e D 7,ukB:CSڠ+G!/ NrYiX;fP;-i? Ӡ1giOmӀZoRƺ`^[Lcf>$`ڙqƖ>9c-wIFXuB t7.v@yJ7}pnnCގ*JR8(LX:Wfh- XYT3`_-kQsFDZRƏUbci22oy n֓`Em+Xo3Vl!x̱*47J|'If/S  C/|oN~ p]ս{}20KDje>^T,}F鄈zYGD#LEx6lȽԼX4MFRQOT!4bd@VN ٿ+D5!LhzyjB$Jr&% !LZ A7쫧a2:`o$Q*$<ꜛ6fN|čzϺ U2uKA~u/Bj#nݢ֭ y*SG{m9~-1&mS.}[{=֭ y*S>]\]ڋ/B>!*!-<7ZӶYW Kr%x i5bPR/}cLP8S(,BemM\_I5Co5y2mYaro2m<-@kYWYITq􈨞W-[rU!.F٢I8JDϽ 0S~{@jd}Սd© hZ4tHgE*w-S.ŨnAGk.hx!=f, tҌ'îAU?EB)l4ۈU6H>vb5^E8S_RIˎdT.g|*<)S~I%u<d`(څպOI,FHWZ9x`.rҊ a _h-y n1vyQH >Prk2@E/vu`Rdw݉Xʱ+ śmY(!޾8[9 &DVgs4k?׳[|7{{eoϗa.]>,Ͽ?2-Oy&^=paEqDN*˿=AZ=.Uap_I,0Nk!Zy'ON GHˍr$\y+!.VX%-S+ "c&ߘ[˼k+Bb4<7\JUö*é²һog(\]9#pDi3źnV5+x޺МSP,+T/qL3LZ<4&HD)؍@k9Ԯo? eHK8 AIpHH8 NzLtSt=Hv/DWI% #NES2ߍ?B;Y&|#Մd@f.Mn5czC*:@Vh'& >eΪ4 R~}3GRo?Y=B$/ϒcB1Z Aן~tk0gɝEp01.^]B.wbu() :pNfZYjYfaF,y=̫\ JN1=)qby>9^C޹g( |K"$l"ƸIl/K1:rG.iVd`"/[CpS)эx^N7Rٱl^ĥp3'\2L&a ppEl!>_ H \nJ#u9օ9#) *Z³_3[:h9N]T^zeSR6#Ĩ}NŒڳz.\.1-J e)1UBv2 j2)q[P')P"0@stv3f2w- D2f( @ECCy?.wB\X< M/whx,QGPLŊ(7l dZy5lõ8[?"S($GAQ[[Zm?5ض?Ll'fz8,^!{b ],PҵK2 & ]UQ@nlyb1o[\QθQԼ*2ε 7TZPFVbT @89cEi,J$PJ2%9 {9uՅu!Iҕ͊M'w6@ʸb=|>H^7:Kln82'tUmJgmYB#Zx!$KiYZVҩ]0}FxG{$^wd,v&/]֫>r cI;ΞȻZA{ u;߱݀^2UGǒ4:[ꢻ[]n[ݾ4jcO%Vz{q>8Us"j8{-EG֪g.{,tuߝŻ5Hֱ.bc-}:i{JTWr]ƫۤ(Jdl΃fGVR8v?,gSP:0+\V XjLVc je6\mK =Pm{u]89_ty _͝Ɉfz_FGF*|~_}PHO^'䗓%LQ;ݢI<<'72j29;2ߍebn}p'`EGZ$\\1:{Ƴrlj:a7G|ι{x-yT# QCQ#-\9o!iʀU&^ j=Iհ̣/Yc+wt֨iHgj&DN>y5i7|am$ьf3-M&H·,9+2)(iCPQjMEm%ejۺrm ϥajXM9W $ Z0X,Qk"dϹRRK"flpXD&fsAH6WUDN^2 FI1folX1$PAt;\b*l&uM!_&Mm!,N2@})5P'=gVt->hFX͘h}QYϚAzO-Ti@&r,VBb8 s$a3:ŊV.Yab9 >d t[#^+12ZT\F(Q\ ʷ, uWޞ$# $Q*;c@&`g[(ʄfs$BS쵫ť%eQYq8ΦT&wb #ZR~X\1&z )WQmFYR,a%sj)'1i@XFݳ)Keb8')L}|yqQTx) JaIdɕNt:w(@r5 Gqm$cUK5B&*4-Ws1;Y\Rr'pE6|wكd[-b#Is9_PGt;5VhFg!G+>^AI{hO3Qx"_'qnl#ңo ILތ$+ ___ddFǷKۄl%𤑧=f V}zcO-5ͩUN1$dwQnv7l $+z83ҰuwnK.-A픳Co%]Js`l%| V $#p$b: \:_%)qUg14ljWMqlQ63ēc ޠ*;HɌUy2wu#scЌM|L-R܉(G$gwmq"XIlqZca=7~u҈[6K4׀ƐF?Q40v wO޻."* r*1NV\t%hРy'B}%xo?ms^F-~s{s6rܳk{n>x7 ZT~Z]RmvCK맥3 Z+kh+[plBKW[O­죎O`pjkiq#H"/|4DѕЗycj'āW~>#Ao@V3:u,&  mxPy" 5(2u.g];hun Q:E([}>N<zMؼ^}wpk|݁: .xo8|K a~ L,9+i.z!Nqqt{rz7L5ö%Y)'n_nUN΁vIN8㕒{;]4cCx\˒] , W$d8@x̜݈Q%R 6ɭqc˱6[;P_D/~]n~']JXJX3)kf^zt}wu^:1m'1<~˻3`\~:z{uie?.Vߴt^PaUf^lpY,?n$=-YZ&FQ?]\6 ϋJZ8 {X!GYav2IP; ;g?o= gQ M%ɡnk b޵Ƒ+b-Z"Ȣ};yJs35hȾȳV]7Ū<38οdJD6>q.FPyԞ8 *ẗ́C`;6^赧, }٘TgA3,TXe2$t-:RN]8f6F*#~i!xğL (n=w+lm[h&'\y KPȉf_SF]ޛ+˰*+[DW|^|r}g2F@adޔQ9hbWM&?>s, TkH q y_|32[/VS1{Vޙ1T|Y~ !|C$B> ("Yaaͫ",TO⳦ӿJLzx3%S`rFV?0; x>Y}$C; ˚]})S> ./2_)7@SjЅ ;u#5|')H`zּaYLy&΁u]v>3!M'KjJhoy60<Ɣ&̤#+de$25o<Uׅ)0/e?Ze9P*k"-,JtE5mF<+=G+UTՋRUKWTbb9̓\/7m X4vzbF')&H#s1rcmMMduuE+h%oS-_yH:d q{GaбϜD,CHD>2PaTGYEPz\IC A2nU. ASmZ8IQA+T1/̑DeKFZ 6eOa!ƔgJU*K#jYRF@Q:/H7cDll Kj?jR00 ށTRbD,*1skWƁP㌁?jonXvOsSs`^OӶOQݾ/`v7K`cTG DgΡP(SIIyi[%̊>1̑vOd&\*~[W}-qY0{cp >H{cb3#>Lݜl>,fcZLT2WJ CG0:kNT|pbd=x d2LŞ,^7M#UVDH#Ya@"OZd1u}0tRGD&̜0c E&XeM,Z,ڈbpM_UR@k8/!x>sEcڠ^+PBJò6itwt~j/ҩÁVYhJ Ȩsl!2Whc&Ul5ݒI@)x=:ɣrb*y!tdn,mH=”t*O`F8ySpW:;K 1`@ռaBj:JtGzb_ӥr)Q7pn䚖(.nh}r:gu枺󡹧ڎ5uQe%n;ROX'?xE`䒴;xEOF)wܵKmR88MeSJi&hģ tnl},2H`7'p7Qm,8׳FƉ77ݧ*ҝ,!UiOiטC^7c Q(Y#΂O΃e6bAkt8K-NܤF/XZHZ bکh 2[T3js|be<]ηNU_49@CiچH=."M v;#J;?sxgϡab<*6jU׬N1~K|3JK鱺,V5 ll/ȀFM3]\+zD[9=F_y6dA%Psr[.)+ޣ 2aߑ(9gcYrdRHBmT:`(zFɢ7R}LBDt&@pt:e'TrXOV J{TksJd_gda+}TpRTKQEXKTߦơbhXiJw#ot 6Yib^F`< ΁/I,21Y!!539QŃ2#Fr@\1%c9P34ւ$ +ARBPzcEx2QQ:$ &Fj@ra-+nT+ Hcu̓lvA;ScZ:w̲vMDO }2}068NBYAldL3GTU IݿD-)KPz}f"=5Hu&6AS\gd5C oؓHD]ɼbk_nc?Ϝ` hzQLTykv5[TV kH>m:O RKtu~7oѕD]M꜓;}}ks㎶3wGXEY3'TyC 孷NM*dL^_ǻUޥ*T*0/lf4Υ 3WeSUi'ZSOa #A# LԳQJ'Nԓ,`x{LƦ!7LzO28#0X~v̿H'DH|!"1iz=MT xXa("b筱.i` 1/;Bl2}dX loGI1ǻQ^wGL1 Z|O{>*K亿>Le#"++wDG`1oi18C}:>D͒ᚧ[O<'fg.jXuyU ɘVIjge^ٱh T;/pv`ܔmzb++`6KySLsJI 19QCلW2u`H7<\38+Rh`@Wrid1RD!Z|T,-Iae''0*Vݻ J+@(0&(:#Zz)$A˔W XL^ī×7})HXFw%}Jy;U)1RT̉9^wGʫ좴)<#! $=rU8ҌIejB֔(iV#=2F]+k2o^k#W Ī38JcM+ B5G>J;_ZX|-H7Nk |nB>Yj0>n|v1Qf3=+ m֑m0̕& t'EHI%]v4wzi&Vm|-xTI?,Q% ]oVޮJHbs?h@nc<9; uܵ^#Yz9dOxED_K4stK0jc]CE͝r SNv̀ۈ1P@\B=N=O:hXV7t/I܀<~8qG2Tc0/mS9f3DUK&gxmi4㒼=mb*y\х NKBe>΄/ա˯V';,pxmj}1 AN&el2T:H}f(y fpCf:Ֆ!hM%d09)rPq 1IpdXٯGLu2;&rƠ8b.\^޲kFW˸dž&򔛂$2OB wɞ  kF\aꢩ+3z&xg뙶G-pا&'utzdwi0Bs*/'.AǥЯz=󷂡L3vP"6ڴL':VqB}Y6W;ұH& udڪɣ#sRqiPR-a#(m֮KUpw/3]:(lim節D_h7J-V+U)A+,ArEUVcU0›+I*ٶTVF_m#%+!["jϔVeW!j2*bӆX" &}5V~ ud ,_~![7.|.Q3ɐQlO 3 s'֐-4G'ʝ5:v=SMSt`?gjA\.3q wr){^l7MmؕF9$x%dr2^#B'!pv 9!lq;9P;=Wk]6~* qZ C\>Zcj-?D'pfUk%6nh1g}ybݕkd>8bjSL-p9T DӒdQ갛L+xt!WWv)9ªwdxؽ8-}@n!~\XnH Cv?Υ  d{ʎcrSԣV)>bhB2|UoOtM95e&^$n bJBEALvG][&#t 㑆ao5gD {8qP1l޷7Q ]L0 5Ty'Gfe&V4Iɲ.G"'Lp`ԨJO܇#cx.Q0ݝx.뒌֭%eD0^J7 *J*Q(֕@]]f J],Y M%nd.A`o+(i/xQ6HfʒR˲>(31Ikk! VJŔּ:D<b7븧5JF؞J3)t-uXx:l&6(~{07vaܭa dRgֵLqP­a BÑ)Qd'{(R婣ڞN2HHӤܽ*ݤndt%ɋ <%JtE!ze'+aϓ%ϸaStotsk`baڇ> T3#!k<[sBt Trl𪇤_&rTC,1DIzH>Dm탇>2S&q9jG486 rXJ_qJw)0(<6w*%˪"rrd;"7Wl 9z /T >n7dUFa$i2$qIފ痳>-toA$YUnl.= O4!\~6-fz;}صb#@J*4D ~=A?l&d DN8E*Ie2tދ/~i~aJ#/Q5u(CDZr$\h nx>OJZ<e>.ʀM"j+7o2=('6 bd>. }p9FcE3"߫^:#/7FNVSTyad. ~r R{J63\eT';B2+_EkZ>JV.u{S~Y_Jq\ !k#Pf+ ዄ 3ZA5UUtIpV6BkYj{ A\-Hw4-}bA'dY6 W3*f nc'ϼbB"VFP~{EZqE8?/;5{qqD5BIJOF)50+58*HNIc <2nq;O|nq`݋%HXNzS=L TCw՟nś%gQ3EN'c*Ӎ>wZjTD@ =9#]qŦ''V1MoISZTrYurʔI !q:HNNl&I:۷ 4;M:JQ9R*_#b/)E/nn77oxÞ9ZOv\ۛyјêi Lښ5kk]>Z,fI-(8qf\Ħ(%5(pN8E[Wi '>/5^ӈ*ElKLJ+AѸY͘Vw^4zruS|>+cG/]FAJ+*MDT[ݵEeonĢJ ,YͥuSx)V Vj!@;N]/V1W:G!YBPNjVfoW>թs9qBJ5 W~5+Y%Piﺱv>T2 '3"s#:\VkQݪY(zcYey.Y G?uug¬k;rӮf{ϋ姫1s߮kyߵf\6WѬLe#eJ?Z}/?tUWjk;3?\g2׮lEwJ ďPoÓSNABUekB(LR4jHť* mHu@p[angG#~B$== Lmgfp8QQJ2>'F~꜐G"m2um{vpGq1Ns?kA,%]3;ѹIoiɭb"Gsu!KHb!; 'S+G3oqH!^*0)U4vPfػû=GǓo:z89@znΙٴg^/j+yͷX}19L7`݌zZS9 Uf~)xcb ?_r8uk'$_30{.&~3}0)]kGcaP{zq?P%ۧ*'{='=\g Qe ~tiRna꧎;{>8&5]2]4xgE$~Vr!%bND2zINrO HsB˗O$GՉqD Wn 8nzQU+Tjs' ̣f+$FVWEUQeUNLE _>r)截s5z"v]6J@O{TMA3E=0LMG;75oŰݩ|*Nܿ亶~:K1~@_Kۓ( ١C V;ٞ8D^$f)7\ܚ8l-HTD}6i7Fݷ y1 j-ڪl[أԝZ=ϝ 8Zfyj8iu83fB0+3mU&ĺnüt)YshD+HiP)]a$uHIK50;qΟn>w|G_4N~E?, ˞wu(5#D;( u iZ\ D #7$r DčgfZ<pzZA@şϊxO%Q&u /xHJ@|>M> \OrK Ƨ?}2kkh^wK8oJ5 淂 ¥jX8HچUHj^*EL З:5, ϮwS] 7'$4Pe+ Z ȀR0$ [ԒR)@#8~R'jjDBU`!թ9 GN8ݻ"OӍG w>fIn k(*J؜E\q71%Ihy( \Rg3YCp&_8o KiL|b8Kٻ&7n$W23K G&j_dyccc5yY@$$[f}"](b7 E}@@"T8m)^ . aM℞쓙?p1'7zݼSEKw1RGpR*+Gqƺ egj23$CECk cư䄧d o}VxqZqݍc?;Dfi9vϞ8TmIB\0'* faZZ?or8v:Ǘ&1q8OSmR$-rQCTƀa.48{GqD*MOhβFcή8CYFHN(Xk+#LqˬQKH3o-lx"|+G 1vŤx@t/`Ih@i5<˹b$5brXes4W k Y ̿! @u~h=Im(7&OR I>u(s/-Ksx1;e"#9̹\LhG>+TsQ[D Ոc\a߿+Az9#RI ֡39Yi,j q(*,S˄ &+V$쭨IJr]VTNzx_EW?UQać*WcK-Ve/shX}%F(qf9rUOTr [}}܋l]g)aRכ'NO?đPR;qeTC _NgZg/Ǘrڞ@NzxOY˚~(TƵux7Y<9qG}^RN>ՙ׫J>xN{||s_9OK-Ԉ)4~<0P= snv04 7Z#F[E#dmx*JFVѼUua`)tbT < ֫Rz ^@~MΎS@r{^Kg%\hBQn%"*cYJZ6x𣟗\wkh)ZkN+:T{[-& tAԩ*tLT۶t0Q51C^5Ź-dIϟP e}GvJ/"83V?rtkb)zJ+n|z{3sI 엉#nsk}9 P Ŀ>̗[5c6y` N-N׏yg;aMll9 vR-3m,FMY,V 9 m°N6SԢd]?paREIɤ9kb\= 2+a+RAVhRƫյj L3,~'-.σ1lhzw/R gƈX^]M̍N|Yw7vZ}WFw.8~׃!OCVoe)b Hil gz2M+5ȣTm)\ fy(*vAӖ aV't:[O e ݱ_?=$/m_eǬ׾ A}uz}{I;hKbuGJɤ '7\Ε`BRTCI.xϨ-/fUD/6v_gmQ1OĨ-1z[M'/r<&IJOw\k)(xE=KP RqA}'VR,EI7C"n2dt]\0RJ&P#2F$B8B >,̬.ۉ7vzk zg\l&cjWw:z1GsIyj0'eT̐\X2]Yɲ jgS1VsR #%,4hjWV2INЂo3dA$d4TSesag2>`A39-:Jx~\pmWX˧B6vh__h ~{XS]& h}\߅gCBe- ~۫A>K~/ NJko‚ WͿ]|=Mlb° C$ .K~ mŁj/Kt+"HA~~1sFjTP-3+D)$j܈0K9dxLZ ʹ=+Bñ[֬*DC+>7D)ԝDkȎ4i996Z(h>n ~(}.zIKX ^K\bs' ^`˱k;Ԕb[o&4iͲMjФt~sV;~$W[<9HJ*&cZOZV_~>ݟnY -vGp 2N98S8rN :} G&nVI)BN*զ*@8mŃWQg)VmF˔V6lMA&6*$Q#A>׃fH*-U{tu'蹝*I)6y2yRFqAeO+,TY-B$1N=zCVbGf㛫/ǣDڇ-nߑjf_NEVJf1ҟTVfuL] ؎P0>|sIl ?Z Dp𩷬N+\:nOλ<ȬMӖD@[95OMb2)p˻̏n%AVm e9.$6Pʵ;Ju $ج](IO2] Խ$<-BT- ͋$e@\3V.TCT.E<#Ŗl>N;bC!JY˙.~v!Q N%] \"}=.e&bd)dhq:4Ç`5*0VJ\3F0BgVlNTdfV&.7s&W@(n5` +3IyFSP$yVRJ*Zwg *꥝ٮx1BcjBVvK96?I^~hȈB4blW V [*T ɮfJF5j9H&"p)^~ᵙw+q>VŒΆORgؑ=1=[aǏAHMe  uc\YJ 1 n@ㆀZ 7L)cedg,bzKu\PۏMP[CDa63HrEQLc?kRρtt}jmՂc>PK|\U"HK,`=A.r2R 󋮮Ɖ\>:4q tQOWq@.NP7Rg?gS^&A?! (8h@0 *f*,ADԈ,iG샫$+S>,+Fp5I71Λ"!?݁N>b#B HJ #}ۭyn"eG;yUʨ8.˔S`ĹT+5R-ej#"^ P#'sf#A2-A꫰ ꖄݡ|%4*Dfdc&gRr'2@D@If)R8R >Ul yϧ> Es׃?i7<|uW"i"+ώ&ETqg/} +Fw?6 ǩQOG?5/ G-?\ĩ X/&ՏC{? ؉0g>ه>ٲ7e ˨tBq 0TrL+G2Z͕680 3Ѽ/(xMx=x7J([K8׃~6]4֍DӼx`K7fO|DTCf0{%Ա~T#9wZ% gcKK塛Ye~G!N$_~'I;`i'^(&dޕ5#鿢 %܇#LlLў'o8xU JZIUmHDIRTݮD&82 LmK`|8B,J1+B>IRJ&-(d8&0_=J A]=LY}irgS/y ÄI j+pOr!- ؚ|QAGx 26-{`pmwG]ng?|d ؙ?fie*4ڌ/4!lK_ O*,~4Ax*^Oe7jhC6G!=LxRVOe M/EuxȰϣZHZb): yOK= mӌlt{0Q={uy/sSDOӂx>CEOY?ZR2.v9{bљÝ˭Ωs8܂6APlj1`*hmUvjUݪ*WҾB)^Z>Q[7bh+f9S(֎7r]( L޽B{㋫*++l 2VP쬜F,@-@!vw{@ ~u(_oWEBPH`uwe~["@ruG6z9)d-@BJR;~^.5DXV掃+kS#NB_G|. jKFRbQ[J"Q1Bޫ9ˡ^[w,ί#E^:{8z!yuviY_!7pOCÓZdKŲ5fvH51(Ug-13y3xٓn ݫe܉!oN3uWn:;X+`4f-qpcu(+Z7WPUwiSJn7-qqYr 0\}͏cN'*4yCwstj3QppsS jzlέ䇫wuZBJCYVHn2k9p&h0Օ.c[%թ8 Z&jPؖ=@Bti}өV//ڢ죝*ªqcNUЂX oܹl׈!vF5s RݔWAh.k̘nH) zGh˦W`P)Z&/gTWktbh)GEz^:֦ )wA(}8j*0Wo?t)u(ۻq&_٭a_5?+/T]~bI6]&sB˥Fg}\A^ +y)~׌tqԁU;*E\ͪ bE8l#]<0SԉS~>n 0wTt՗h.9˯mqp\к ъ8>WPUwi.n^_Һ`,oC2 3N<=y±8>iy9`M03Kfz82uOK5W G-14$IKɣBYyS3NûeuKs՛>/g˻X.gC;9& 7q8-YoEF'/7k)*}^ӟ7A4M~ܹ(X,S Sֽt}j]#RfKMPn8 3 \D*D>y%ja@^ ;T Sap{nMt&-BZ4dXso>޴Xns}fA~f素PA1X$xu<{s͞':?^svޕwF1˄M75`G*Ė_",pf#y>O4^\EP:Ԝq|;֧NJ kljZEȉuh4 l> |Zʔ:_c2ʣ ;*9~^(ߖJcq&hTr'9(O'N|T'83|0GDe9w0(Gvl#U^B1'VL9^CA|ZyuwUjȃ .{(K&$Yf{͛@n^kF7{xVw^56^،}+3 ZaXr5#Fk| titd1xvݫFXv4}G$"v%I\ȺrDzK `Enքs/[ZGX:^_"Iԃu1ҹcO@Jf_h#5yUYn6HYynRZJQ ZݑI1DcukqP&ҷV$@G!!Δ=H-WA4I3uZ@ÄߠuG5"FQmhYeܚ0 XMeْXmf,D\Jc 0K$@%Y*#@cPqJ(H B%r,Af*p: IEZ1b%XoX& DŜ{n8jKƅRiJIQ@xq eYpkgHxw㶯} Ks5YK3($;$K-=9 8].'=)$ppW۟䦑UBoy]YE".p}>`Tջ#i,^=HV.0Lw{T+H\?7&1 "m|kjW2LRyrLkJC0+&7^@q@ P9{N6{lY34O}t ԡQsޮtK5d n*CqDy; |ߥ憊(0 zustI#<TB[!)]gCn[Bd@0@:.6;RcD#@UiʤR0((S #DǷWS ]?/2DoQe֑HC&'B)©P)O48&O$ Nj+vbe9,6 Cl zLvnYj^lcMj*z +k eϓ-g wIޞy~BeWc꛴TVg2'ܒ_CI\IF*$F9Ob`ϨSPcnXP 1F,wNV鍷\xk" "|k14$, =H(%ubp% %eDE޽r7?LQ3W.<%?SPZbdKNɘ k(!LJq0AK=0MI]Lgs܂ȡTF:Rx *. \XEtϧTc-t=0k-s5|wX34ҿqˇ:n 9\3k_.whv *jc/oy\h&' @ ƮBC9󿯕ϓ»'6~&QS»WKrpnX^w~t\#ˆokW(HAXU<6!`1BJ$Ј0b SLBW8>GH-]Vs'\P|.ՏEU8}RJ=\T{^;res\܄vt$^W&4>ШrjF^lVس*+Uhd8d&ETaglϔS8_%u@vʵU;F\%Kj]L3^!$ xvyF%8R PH2HBFabAiyAQc9)﮸W^9(-Pvf> 75ucw(B^VA 9$Al vyKEo?W$knO٥XޠPO-tv$sB7/>u1D%.|;=\~?I:M ~I& P*R|4Y(H!&:ZICA@rFyDX0FjX*'V=`]'!:8f)J]ǘq!b1%5v6$ H1 nC2=vQl8~}2/œbA@T(O &9Vh/S&3Ӹh#/x33=^0Ŝr,i.O.=K@^|C֪a7)uR8_+_O؃jN'x-ԉtn> (;cߤwr$9ggN=v)g_kYZ)#R}F={~drF:PWH;J%^RʍͺnNN=qC16n(ONR%U^0w0Lt =NF-08 P=SdsĴ^l7³l"1rGD"izԈrD=NĴR9%3ܜ9{u|JUHXO]E)w:γ1Jf\wTZ7LmBK^98k[eɒHBç5bК(JPJ]-4 ." YXFQP" V͂S=(1OĨ"2iJH D"GԊEh QHb",4V;a*MjSZ55FZ׀Çr?+ʝ't Lɘ/*HA0}&bݕJ&US*6>]EA\(^3Kٯ,L2jv70aONI]ZkW1SWJͬ,th7Хϧu9 &U:g;L|nUh稚=2,w4Ŝz ş5&z'1Vl`=]JbN kB:=cij^@][t[̕3oB6N\^a@=#<iOYK 0!,F0D::X' 7vP2 a1b,@hFpg2(TLh3# :zbHDŽ2%nQF3¨U]#'%SӞ-@e-"J¤<\Zs`81 ~  n܂ 롄|+ܣc ~^-)w怈7?vSO6Ӈ;|ꕡ$\Р.PY*;ׯ/K @ Xa 8zz#+̃0O I%wqwYE`}nId{NvMVϓ]x?K=yEi-"l;kuQ&DrZiVO6S Ӗo{:`1@A\_hjgݝ]/xNɞf`_3RCwb2U֗YN\Pt+RZPuxZZr&hcF#z4tZʗ>gT$. ed^ܶkUB5Ԕ"WUWS #QKK^y_Jyt=JXZڙoJOy@SO"1o=Ho5X< ru#+= 6[}X2fiu^t+cRG v?k%;v=c,_Yhͱx>7 搻 Z[9?_]ɤg|2+3OYMELTчʵrwT|Gݭ;K9}{[H@ݭ "ZA8v78Vѩ2[T{[ ݭ "zL1V%/}N8q45|0[̟ӟhJB̓sN»ͫ=ywqsrҷVJTୠUCгf˵ Y[_^±[n.gP)!Gq4%x d ҂vWg Β̳hpR_T*1HlPij . Q &ho1ckkrFej{R# U!̞ڪ+9<\ ZkF(9uHF#bP@##iuZU_mnj5Al9;mLnA $n:S'^bo{ql%3P@=PR0$^LHNEIPNEIpYgJ@pQ" s 12*bOmadB c(!aJ-Zy\H"  Q i`"pS(#a|=w s ƺxXN%DžMB<['aL 3#5B|*@Z7u"s~b_~VYbfGMx *wӝ_(F쁥=EOIA%(u4zucLT6d!"r$P G<%C@8urD( 9d @Iw2Q+yI=o*Dt%;^\D^{A t%jK) ?g?}#Y?Qߕǒ/dg{I\P$&ե0A2S6! !u}x(9u{/Xݶc ݦV?W*lZ 4kn[̓!JJ>q+p [#ɑl8ϞmRkԻP ;^9i'nm0o j|6`̊tMs:{3f"hh"[˵RxG}{k=ȝTw;Һ{FyۻKlsrǫ]k~kaBTK@{,QRB9lgjB!s ~#Պ4(ˡ=ś9uQ s)Dm@b#c;ߪQ Z&yaIOJ):ۨ;mTQQQ {9"\Kx P&mI|F\]j,+mV2A %3C?tkM*xhIe*oy=E*YCuC2yUijӬs O:,M4ۧH̻ RuBuc:(ݺ}!'jx*wBDl 8{"`|:" |G)>! \eg`!gnm So2>(}C(X HD@  8쮶kҎd11V(NrPI*X4@@1UA- ]دf~l2}t4A='㙺KUSQQ8DP{eeWn!Xtxj/5n}dPxS YF~d֫5M? >}QϺ4M҃X "'NOdOV-f2[. D 5u"p]}FZdŽ*?/Ӝ JDW(Q}FQE{iwn~*/0\%|z웶QKn9D9(5h]󣃨RƹXBd>5 >ց>)Q`}$<1aZGW veػ [RuAUA,bAT7wm Tʜ)/Jj?>T)dG7dL/ 5dj}Q}pg^_(Njrj ;쟕"!/."V)DƱHB$" Œ$Thh01XF2Iz}QN|LMr}/&W'}غ\%ҳ]r7Q$fGn@;.ݮHk(V2w,g+P4JXW |'#VD w6aYȊ\y k{`sX+ ]R !+YI&bHp˞Dk`m989P:fn;B|Î"@@0P P

>]E~3[-K^x #Tp"J(o pjz(c1}ky{RJbcccҗ째dgT zC !Gᎃc#\l‹Lv5Sgpyu;۝̼3^׊a:[~D˹& OΙ;gi+|<$(P45`\'B&q"4}fb?JJƯ~u9`EFY]&E d_V4fb_^ٚ $&ּE㭼**qwzg@8$$ /mloCdU0IdbHdkLlj!'IL BQ" Rqr _%m'[ks׌(*#D;.E#庥\q.7'sޘ'R网z P'ý $/7nFSlHHި9`M"vE"FQbYEj Di5RMW=}iPP,CdEjy ;`·Hm,RIc6x^+R@ĭaW6٩}CCȨ f0~5jWx2-Q!j%JԮm$SJ@$"lq'Cf`U^}RmRfWֳszU?$W0f*).\ 0CJ͖=:rRa6XDm}D&:q:^Z_{/JY! m9 PPN(kY!/^+C* 6R3yayxu bER3>UF뷋X~1D6nY>\6HRQNIՌ5\\%WF(Enn2zI=śb1uQb D96ҏX=^? qaբ]-(SSPa*hk5FgGnWj" K5pU l,-)żBx;s%-nʺZoW[ӎWj3l6ƀژbOIXH7y.{ONsimjf̽y2c:sE./0O,Nt] .,@Fm7oW2(Yw ׃8 4 oq0LcaknQ$OB&m!u3/"9\pyhnpH9xP]?|^8xSpAS<헨u4A6hcu.^ATKpW+sBM^-.1Zxwud5#GiS#e6x!㦞5pu%' QvP~S랟mJv"~aqh ]BECA2M'P8] ;ARrkF3A"], ^,D˄^0D Cd͈]v< XPO,@ 09H<gC "6ÿT,XB LDsCBc(]1e XlsjEy3/u܅_n>٫ՆQ٢b:z'r<7*~|8]d_}Z{DKg_nEl}PsLυ*|<5IMv|ݚE|tɵVwͧ3>[='O9yjg[|hbHGW*:[|Y 2Y+ֺ3Pwei#o[̗,lJOKͣND0B?^QS"-jZ>m9q|k8$u C{e-Oj{qRG;Tm4~xQT{/W9n@6۳rߨ|yZ=ƉI!{j'^OVϜG}7u}3?ZQMj\TEvN蛝 _Vo}Ng/7oO37:o?Μ(iްw/Vo Ǎf6d18`ڰ=O{3#cKvKVV?5#Ѓ.VU׃d+}tyM/kϾnn7w,B&7$ 0ַ0< S]4eNogf) r,N{A~4],?dlv:s,_u_2A&]- 8;3x50u gs߇Ǣm4A3mzf3bIwIk>4{jʈd"V&Ѿram,\KIYm24)sqR+Ql¾n&/chL!kɒS%䍴r:oZ/ls7"N' `.Kwv'EnO]Gw'EB.{鮌-P~bkۥ0KE?eӭt< sIj"\騥CsVUez#ӆ-J<Òc"YNJ $cc0R:(FJD\&,T QX0 Y&Lm~ /:?OB mJZE9:tʠw?^_`!rN^ƆٹH*$f^oߛ4ؐr;K[11[631޿4zX&t~c3_r~=}g<)%\(mr ~uQ жkzyBh̳1wZvbgjP;) 83}'__t}YvkҾfA/1'E+1͋^2eӥzR8 5hEkD;[R -=FHɇ䒵T;qFJ(쁖jǎm+M7RLَ3fRB/Փ"&RnZj -=FH5bRNݴ*.S7-MF-l-E榥Ȭx|dnZQPqZ 6"1˺TOTK堥NK*) -妥)ըPZzZJRQAUի#AyMYn,B<>[-Js8{ L UQ\7ӏi[n1POqHsq>R1վ|s5p! \nPkE@2lO@V#5&wG!Ntwn~$xjAképaX&T:J ẉ9a-'^; `J0X&kBnzH%$Z{;QQIAcѪv$W{7*,KB_#0ԑ$*q( 1sq#@ LfN7mW?8[efQ+Dh?38VLV0TVBb1|AR"#n\dCASUHt` *EDE-X .h0)("(E{ $ǨP;Mt,:MҲ߭xXmud!@qR: G5j "?2!P$HHl̷x]?yy)P(¡|WI=I@_)Qk5\:Le PZqTY ϝIOnT %8jIwg2e_{vwQ )/:ǔɴŊV@au亥hsh_=1nZc r &}i{J*<Ko 8if;xTȭN3⿬5"&>篦UդETj̣=w\\ fUv\cRސLTKroCƗ]3RkcƿM[sг&2!҆1-&ڻ WY("DCeoSv}:^i\DQ&9]=)/sjҐ'6edj?кU偋TGa쥉9Z:4䉫h%buuwhݪEuޣu0GͶgݪ8Z:4䉫hAZLHB~VճaM[٧]7Z mfӋgδ[ۦ[ 5{'O[\GGӞRdVjtHDۚE2zR`h-EK `'^R榥4.׬iiJ5rlek[Udm8cZԜkM56{ZMK1m@scZMKS_R\8!a-NP=)Rmブ^P;2HT3_a%a"# 4or<,5כsrSB.X:d7V^d',P6bIyHJxalv~nrv|YY0Á2CI7!Ϯϑmg{,s3X;b%d-7nY(A>o7>LǘL.)A5ŋ!(\{ӥT%{ӭ2.)5%K84C pMc$ ̬QgtixbWL+s=Ĥ;DoL;-^TfQ!|B-V۳#R7 ({bՉ;.=D.)Wa?XpW1xJߎ,Iw]N4ж `_s9y+@JuEr,.]^s-@p&Se 8jbu6ՂMdn ̈5,`S,Jho{~5׋{AEsN[YNZ0rfp Tk@uo?t,%Q .=D lBCH/SfsC++lt!1W^گ٘ 5 WTBJa'>Y襍=ٽ-Ȣo~]΍楫U&c<{lg6QZ&*LɪQ|D֐(Z!0 irFTsQhC\p>V$ac?т'aq X:(EVLI*&d`2?%l?s`O@rEE% v[\lVaKOl*~f((&`둑bģ50"j#" 5gYqb)J\Ġ$ : خuq|-_j? ! D"ȣ )pA 2Sz*V]^[c-I| BK˜~XH~+TJ$!!aȫVq`$RNnCб43CdswEI)-#BE9LHB Nkbkue W\sM!ZuW̓\yfj7+sU*X}U#̺MBO%Ao;w U+NuWAh,6F5zrdUq߭Qþniuiþ[?vw3SG}INiɝmm{Tb۾1e#k潍=oѭq?=&Դ/ɠ2s =ΉVƘ!MSg~%ʕbl>$xl^.?XHrt}gʼnEa ׄWT^@~v3LB1+`qqc\u@"c.}5U+uz  @M'8 vcą"v6=5-ρJJIg]Rlwtߌl=jIaD®VqdVuwMUY)RYx2T቙ӌ 29qjtu1LZ3c(4#9d$dV"'-0UV3t6纀M}ƳTFM1NJS[O3:F<1VPBAeb64*O d@gsdz˒CPْ|%E~u3 <rz%oc $)B:MƔ%f[)2m@ZnST8E8`.NO'*vADrA(@}: ?ϱ[)g@Y Q6pR\9Q͞cSBw M xI%!# $@#MWySŌTf `ʸUӨ"°4IJ7rHMrX1 LrSʌRWU]Xs=e7bσ˲UIe+d{Xz_D[V|mg y$a*,EAFPE}v@=\yԸOWdUܼ{#,;o\]yc6\%j=)ȌraEcZV`^\qڝւ{Qg<ˁ\j)F)c 5Rd>ٲJc#ʻlAc*{n PC[ MM|ݨRhwBT!1 'R+{nz6,䍛M@SƵwȯB8y0#'WL%lWk7 Zm=`K1]ߦR`g@SnR$˥aҤ=_OuՆO8؁IIIn~!@yKV7؏!]Lg J'8к+Ya*+'c-0wǹ: %0bNcJ]-IW*.5uAWލ/DX,ύ2eG[)_0qVinIDj!PFT N=*Pj*5riZ ƾH!j&2%Tʂ)RjT2B >@g{)U`YZNgSmnԡJxs)% y.fLD`ʑDVC 1Tc֫n Y:x{(!zZF_ۨ wYS+J25`2Q 4&ua3֐QdH@`o\L Liў;vҞAo$j ABNA .V0hC->q4uod1 #=_!(dJ =_Jn*1DL)O)$@?ၹD>;[ =o3$Pd@1tt%4'Qml閞FCxfOin.l mo>i 'eְUE&j6qR`1u*\!qRj 0vpvǰjtqOqhI{`vս[VENu6cyC(`x)(.V h^du#.bn,X3Qyy޹w"JrDꌓN?d`å(~ :l!N.~ckK.R7!/55nW4+Qo98B^gwDԁĸx)ЕBR5A?YgPΫzL? C ^MMiL+e'nMub:MN}yL{$wo^ڰ7nMl*C跼ahiv,a++]{Eޭ y&nS2(\D7--= Nz^"WlP92F*%H=%Urm >ࣔ\|] t>fE*u|ATNTRa cTm0椷D2 !X\c4Z-~?bgN2azs狹syۋfeyr~,__vD,?OE᫳R e'w [=j,bry[;윷o63OEwאŪ eY֨I߸O@65np6ztgR)>jSj@JJ¬͑aڔ RToAJI}) hgg5q-2r0k%EcIBv59$DZ̭Dnع %j4W g67|&t4MbI*K&* -ZID[gg>ynfq=Y|'_/_jvcYc2E2Tq,3Su\H$;&ҹfv]L^Di AZ;ڦRo9dt&c]^u8Q=ʉSHZ6;Ys~~{'᳽u_L|n_IT~ia,} }Aqshxs9-of>|LLJ!R-0,ú6uHGE'|EkV\EaK +rU~yGxb1jljE'{HmSE^uC4=(ZilV})BqٙU1>7$ Zk9dՄ`vսc[6%#Pe-cC;3%!m+-{6?x̯>-}!60/eXXΞ?,A??Y+nt> n</ﯷq~"ϯ?ݖ<'H:9$fL*J6$^ú (XOJAlBZ KdrD٣*apLk;J= h19h䨻kq[+?bRw{G9.i5&k8ōKƖ8JxxN;yOߗb5yXR ݧ>=/m[2O漉P4CJSCѕ׍ *2.q;iܥ1AbT5z`l٫DڭwKyt8zS9Tx{qZuX.-3] 1Rc< MNw eƤ.J [&5qs쨫Uq3djqK< q!8.o=Q牰dOXCQSQ6EW),ƥ5KiH8JwBM+[~23n$R"܏e)l=>J I<|>km pwX\/. k08UqŢS%%a6A  JSP~'b;fϓGJY$2<Qs=8$%=#t+w(NSւ!5y@)N\:J+3L;O@@*U)O;/fdPR*p+%6Qj?uï.ڳ4+y;VW_iK }nJAϿW?>r{wS_.R.4qϭP?򢸞+l~~,{DtsI?$~~ys{w! HyqO LˋߓE>qM]\66ZvDMU6ǎhM1TɳOi"#%^0+=$զBїY)' JI})58;4SoyRØ:>I \JJ_75~A<~g37t+Db}9+R<ҕ.,+DC%S9r6umԁsc3j5h εX] &aaɮ?-]eq=;&'o,nQ۵AlL>ߒ:{+"}2J|^,s71hLP}Ӆ;k \Axyƙ£yFw w_Ա爊;t˲1gU룴@m>Q,B`uwDAv1kP`]L]R:Q{ Ec|z.b `@M'PcVbbx CTT} ۏ)pӶ1 Pưa=KE"PB1UhpN]I1I32ȴTbI,Ixf$"Ɯ-wF_/nF$"b\99idg?g$~5a8ů"Y2ݛ[@Ϛ$GyXM s=O:So+9 + ,9͟B` .%t`0!3p)*|Vi;\H.v_^7n2|mܸhLgZ"*Vj@n`'5C/N IT,`pYY~/˪jm>ӻˋ sdʝө[3ra6 >vzƹʷeh:~?^~w<2k|oKk{z.{Z?_|k܇G7p6flƕ4ʨ]Nyr=Yd?^fo\ xzt(C5RyZ{*\M% iNt*ư 䦒eEʽ68uK3ۛBaOSG+a7A1ڎt!u(ae(7 P#m#8{&C3cD8uf܍mx:^{1jȏQ`E@,?)Ict@4b#Kk&~ݝwMvpozv;⁋%a7|֍rc6xy~f*c|ڷ/kQymmZޕC:[U!̘Uێ+o acRk- zW&EcRnv4];d[Ѹ\nyn9=V-UtewRn'wy8(?؟,_ `4&Cyt c2DHقt qB8ơ$ƵKl=@rM%+Zӎ5pyYXZr"UƂȉ&0+CfNXl*Vo]$]ŠMՁBm&&YDHmAuDDX+՟!Dd 7ۖRJs)&=ֱRř́i2,u͵Mvt?{R~=$?jww?'b ?6bջ:/|G!/WR#J}' _I4lW@|VZ }ӶҰ]}\2֐Zk䕴 Z R@"ͥ4Myj8/l!}'`D ɡD8Lb=}L^ϷqNA{ԯׇmV:N2{`/i*_gjSRfOn󲚓 a_L{c߬UlLS@mU_~7>xE8I8|{ol*glCd24T9F'ת6@m*Hz"*b؞DpJuYKB=nH" t0;l+КGg _"-(1SitS&ԂMU9:1Ty&OV% pb)*s!g->"{s:a*z+G@NqǖRyi+ ejg-xWgUKFwb~NGWxgʵ 5vL|CAnG[hS`h8A5%6w b! YВVMPDHDL$X"EnMsh֦PPjML 4Op8/ sE\Q*W L%)JRFsˊLZ'6Q ׄdh46d8Vyl!6a:|ܪ3c\|4Etpپ)O>.fX:o7> ͔/Pdb"r.јR& &I4Jf$I.Hs6Ue r&I)&r.jB婻m(0Hj& J!W 3,O nr瀹%X^ +Ret"ʜ´*Q&s(e e0; .gsj7^z'P~2?o?51|:C11C}87&nן#.p0Z̥nqte@`|1ܝA7?6quZ& >}9y[M+癝tINc~-ZIZ& xS!ksCG B_S 1 piwj6:bZ!RV'j54nCCBlB':0'NcY+=+ZsbU7lZ j6U}¼N &P[fbW ?BN;jٺw-BZ-TX d:@Jw @8n)Z1UBRt;3 XI{ yjSQP".HQ{O[Eq RΎSV P0&Nt4dotLh{*xkꆀ`,޲"0j2ro?)MZ*"R0#i݇] BN8tvSjU}ne" fX6-dei7r"K$JA33+u`0.ٯ%=O^/*n$bqoF<ϣ_n߭[c,j!i %!k4gɺcMLZhe6&W.M7$zA'1"'VrH8556qA$gH$& %0aܺNx_\9=805Ԃx\ܴ"I搒CgՂ@Ur0O7hT)=Nyf$JnS}ާ.JZn5K -ϵ[1+mFJ'˥= OߥnE<B>F|:e8#ȵL3`&UdQ`ue͡Ytn6F).͢&eD! +,I7Y+g"5&uI0L&,q'Ez:`䈀t)?T:d{Z-.ʕPM#mQsJO LA<5mBNF$ g6E$iDè|O/Q»R(7HiCoz;Z5>56>1sc|rbr^1> 'ѻD ﮇx 32vedcίjuA0=ҍ*;Qг}ннu L ;`?%06yۊ@{n:e혽逐 uzA0ѳ FGHl{Cl֟ d딏FpSMc!Cq/qԂ6&zOݹ3΄C]>왶jBh!l;PY#Zm)twTk\j)9G5Uq)qq3党9%qcZMQLO'~l7nqK/;+]}Ȩ+q`p gvxR%pp$0EwMnHn"/VVmή8~q3ggVm@j4E3 =VcR*dAV'krJՔu!q.|; J&R 9d@]_@VɁ% 9W_A]rq_T"y]4|Erg̓0F{#Ml1I$[O[cF;ak"Rbg6)IIثϭ-Ϟ,F*ea}j["wW0ç{+>f496cr6uP.0PS&j+8쎕CD =x(+_.[ʩ d H&ߐ`(MIP_3ѐLk/!F$Ty*3.F%Hq -6T PH+'}B u}%u{./oc|in>[kKwXiUX i.(;hGki݌~1CE7ea^޷."jW?v^,n}OvԅmTRMrJUҼ ;TZ%rr9JX)ТJ:uv:JYT( 2<Ŧ(r@a6i҉mRsv"JIg`ZF!r'SY3<)&v h7vY,CH^׾y$H|+D|n-33Rp~7s_۲sſ7<ʐh*3vSys9N8acdSm 290CޓGwL/.pwj.\- hwji.YET rsbSE?Խ15X){iW7 Ձ8VR{@H~a,cP,1P[ ~z(T̳Gt:!νH@ya~؉ZU/_L2+,M2P3lIF?T Ù#?Ol"}5!H*<XD4#0EbX:2@,: +-NM&h 0RŵI"F1A\ X$i&'eJ j2A`-1)f&Ibc؂Q¨&˨Ew|Qd 0jmP&1ؗ1!"!DvpLI*3%@&30s $&K3i#p_oUKLE?{Gc}[eXxgdJ?rlJ/]ɼ)Epsͭ&w>Fx\}Uﯜm/+żzw}O$pʞ=}9ANDA$L/߯Ձ穉 η&\ B%RjtLJܳ|v<+ !Rty|Bs(%!(~wQ^?ޝU1JVr\[_[8z.+-a_噶+Շ 0:RvхcZ#f[5F&jI$\$xh n5e'J &n"tzd] _=j.$ )>C%OPߐV ޿q=Ya;˞zA2;LY$ ViwⳞDw)>S>8gѺs-NS,Plui$)>OrXw$m @b :) Sd 48^_w;^7%}.RCA >0jX9ZRc,{eS'*RSDmnwZC^GszB' bj.to%%ӪԒ^ Z[;jd?M{Q}݄ DnƚS:3UR&]B)v1vQFThxccH@x} ZP.lJTr [hmPI.PIiG*ٙRS)ttm\9MNI&co&=ڊr>(3 Ȕ's}_,m2wW[*` 7kc;Ծ? 1bwiInJ'(:L]YGow6%\K ckdڎ"qD1+pegs~dw[c0PsE0UM=5 ^pdhkKGBG Kբڔ8YiKMS<iM=abjG7R2NsRsk} _?7;楊*_G߯Oݫ‹dG 5Md1Iq0wOffc#)zRxIL͝ EZHNT$URh!RKQ֚10u/s>_>?*T<[ XfG7Y+0Wx'V.جJ|1s23#(wyMe`|VT(%Ymie\fJ"]b mV7oezVV4ğ#Av;b–bK'C9)7wB,|0C9p@ݯ\dw!n%O$SlZ% V HM@CGֽ?~yK#S#DM0(4nlrxU|!-%[ ]F.HpFuj-Fi|EzN]͓(>ztxY <-􂥇Vf3e02'3>Sa,-t/Dv~gЇROR3^S7YJhKI~B*(􂥄4@'R*XJK ci.5p5Ұ\߮/njĺa2RLy[ BVJYyKq$K1ci.5lp,V,43n ,D c)e  ci!YdKYƗLL4VOR(R.?G}KK=ݒZ^8KY KلJZ+C}S3|cnSVXl::,""P*B!ݚ\N+YlRRt3w!xӔ: %;@ऽ芰 NxFKEin;e\qF#gƙa Tssgƪ-;;+= l~[<5H9{qwXX;};*K7<~~0":9 pHLNUVsu4᜔K3NUx𲎣oP8qa6Ԇ H(ֻ6*.ÅU1L[F¦fY&laBSP԰hP" Ɔ0-@0D)HuFWltG|.c碡PܪiN XL[.2K ib)ɐF%("dQJU q7:u߮kU_RCF_͌勻?^>)Ŕ-ı]`'1a˵<1 * E$58pʔBHLY:6krkA Bnr[ Nィ/`"cR<>#*YJJ %1U2̀8m|;n-~zko< +da濯*ZbUxg-I\B\?ϯd.<x?~?"1 矞_9[2_,Wx'|frzFB>}{tA( (z5}<7j>ONUW?>|;y^Jm=OM0sxF}m]ATe@ z0f{ދaԊ BWįĝafMU*f7*$倐lzX\>rК-o6,-b94 Hm /N}=z2,#ʛԌB f/j毛miӕTI}FWDD~^LVf =BnBkez,=y_rA1mo ~=>anCx{%LCAgrԕvl*(DCĥ[[ 'H ĎXbvom4Rfq)/RېwxGh<g!!̲| tO36Pd%ZC̋w7iB6p*p knSywՓѫûQ~ѳײ0bʯ'VF{v\85*jT)@?ZY!=}!,Խ;sD\hp:FO!BW&ڮq^{MM$a9)sK/:KԄ@Kh .NZH]vm;6\Ц1JRKYQkƁط绑/9KhX9:LJ{5ݒZ=Qj[>,Fv~w;Zm9}}s7^󕞍n_H,s;/3Cb꺺y]w]gwf@eo&$Sm~H'"ZWX>89+٧?!;'߾3˒*S4S [yƴ-ľu;s#ڙunm0+hwqHtȌY,!-p~I`Flie9^șN6m|XjwyX,Ňvvt3 [B6,E}FSѭ rsDH̑HCkKMy|egPzoDBe|oۛozBJշ7x]]iZxvpVqCH5ŠkArQȲmKDܗvr*ljXJ`6)mYBꟛkԟgfa[6m5g;lUX|h$x9x#aBfAi (oH~򸳞ɸ&XhLq#lSNZ7A 6J$ ~[2OP/C˺.wvq^dKa b\l --/6_C>f)Xkq@Nc AsLB)qD4Y_/pPg%^Jh*ů8BQ4WD!t&* jJ4/@gBn(:9©mR"ge\8 25`^ EYZTy 3b!;k6b[0!;VHq.ܖu3(тȨVZ-]j 323cQ -襳,[|^4Zs`uQ-saPfT&4K\c2r۪uPVemhhj ђ?Ź`ߙSh]d//rT ]RUZcUY Hے)7(J!,pQ)zKnZ-UK|k5J.H0)j* S] ֢">5Wݠe S]H1QGr,u@s iy)iKD5(cڡZ正)u&7/0Ckm@`R\=љhn΂QzpG[1pc~E8蟛E;doIPBLIzj\J^k`0L')j¥Ln )S paJe(4"2pp#K&QmF'pq*Ԡ'Ců(T~f1_AQXݫCC0> ,n- ߚ<ܷ;#9}CP7\h pa׻Ƒ.Dnw 6VFUۂR4j Z.K&IZ^;9p]vτ\Z(0o( 6ݠ9Hz0C^9E8btLtаz~#& eL^{ݺ`+h'GMceoDna6F%:`+o:)kcuUCp!oةg/CkPqΗ{źP{WL{.>f;pT⿮|}`Hpu}opw-H#p ouvsP]?qxoU nY~[~6$@#\mYG.u,T jAxr΄'|YS{&|ׄ'L}VVٗ~+Y^-e-s#_o<.~[J986ŗ뇪Ϳoݧ/益؍!ϹAʽ`)җE2*<|^m˒KEV5ڇe##7x#aiVf!]hP}v@ҨY?B? Bwڗ# l?$Xd۰Wy9i *a87V@ 9ekkjn^(zRa$`|q9M%~0D&6&4I7m0sV=IW%~wLJ їXgRH.etT뢦J E^ RR{l\iVQ)݂q*XeYQVZiufrUsU@F̅ZPDn. -NI맛I#!ϽkPBVJzggQ%3>VZ?c4AVEF C}.-u2H, F xRL;P "Cw-pυ2(^-&M?jϡbwZUެo.\-ۓOB'dQ콸~%({-e4H"Uѽ/sJK/'J N>\mZ-IH5' H7QR,HK5'@R0S4S ݩ|41 :߈n#f3S4ERC^9E8EbtKAuRƜE 4f}Gѭ rnpJrJΑ>(D|H> [<8g]أN&^:}.dʵE>~X%~wثMcQ7j:mUyP9nDn“ZC;8c߄mH !9  r Hlm'+R bW{Ǔm Ko^IB5.}58ȺyVϔjՒf+3Y5zOzp( q="q A@Q>\g7O٘}yQ}6 <"b9Lg@'$J[#(2HbV(jD;f2Ƣ7{co ;+7dFC U*F,MQ.tNu,?,Ѕ?Y_P_g[n >g܃@V-0j4ZƓF'uEVTqMߺ.]H?7,ju!A,%Pa.N)`lԜA. C.M]Xz!WAUBz# vnT&\ n܅/I9e~#:N/t@9H}GfCgQ:4T;l ƨ%{U> ?"=}vzsikaˑ)nv8}r%TrLе+ѻXUFXèE&~˧Gq]E"ʈ&UCU84Ramm1r W'XOmݛCV+'ީHw۩rgTtC/]oGPԪI>j3%6sF@7 [${a5^b\jR۞q5@aхL0 UTn~#Ff@LƔSIρa`+h ɏԛэz -!IFLt1r,עF.))وS͐5zˆ-!IFLtq}a~!0uWN N@N9jA-=R yU?xB-Td(Ϊ~=@EDu&|4#Lw&'PԦR &4y[r!B.NԻZ<炷r/ b}7_nRV1rfReaedfܤΔLQ%,ub2wo'<*AY @߸oh<咄s;KړɱVҮXB/#X6r{_f(ѕJ\|ra5~sݝ7s8O,͢? 4>#Ͽg?׵/SS_wހǞ"uC穟-!Y,?_|e9?9S>X {Ǽm5YgAmyVkjEk:jӜni2Ih#Tw`l4BXE;%wLJ@!MO JΖ?]>Vr! i+YTvY40xU=д cLj 6<j]r~ȹau86յmw{‘DPV?^rCؘmO0"8'-F籬;׾r7/yn!gx:G nvv9uиƲlIC_p)4/pqI.b k0BJє?_ӆgJە]rbGzxiO7E,|@o3g;wMf^P_tm TLАigT]V,JM-3ڊ &SHfK]o#7W QE8w-6A/Y dF$E#[j/=f bU,Gjqʦ닦%m)0ZFbI89ӭU Mz!!IYuy^)@\;JRR,V9_H)ZX(_"Ito*?j^y8jװ zr:ͼ Z8\#,"`D>B}Wfv۫<ɚQ-k&=&wUd} 5X#{Ix,F/5jZV5>up~O3r^UP>h4(8tp`HQ5O=8 7eBA@ê^fł/_9K窳y,!{&%+6۸Ro=m7Dhh۾`Z=vB%vBMj.\Oetά0Fp 8({(dX4R)2:8"\,.z.bJ^vK4+%1$T4+-E/VzV*TT z[z8C>gwHS8?p_.x}UDhL'/*v{fӻ nYii--J~2V$oJ)I viBjp` ,Oa-Ev*2+RN/Q 9!h,Ѕ"e:Tu1QjRXi`X$ql_0m<*'=&_,d}T׋U.wwyFI˾wDׄ%ɤ=*Voy+&@Z(^>_=~ z+?q!a:bBW ?~2|nnˆƁ1чA!JH!F7,(Z RnOرf|4a!.?W o]»Q Fubqk72|ܩXYZ2{}۬F6Esz<1DpC$yw?<n"Y;ҁygFAt:n]>Wr7"cqTY~v; ^*@|.0NnפVF_b3łu"8=n`z({^S(\銫IД梗wyuj&>ľRAŗ$zMjY}X)0wٲM$ k&<,bk6:۽[g¬N~ ]7 wBZl|7}Vߕ|+ZC5TV y׸3NU8p`I@H;n~f4̮9Eh׎q+!g!jk[I^XMSKzK5ӖVRjp&xZ,F/,`ASQ0tY EPS$ޙ߇GCixZy]|v"؛w{09^bE椞JɁZq6놤ϳ5c)Bs$bKzo-*)ڍI^j@QQi9F QoXP9w*dBWCSFʑSS։^ns>G@MRBv%fs~tS,87cqX|5ĨEwNcŝ{,bӨS0P)uʇZhO~#0㕓f~WT8B>sdSݤFu ޭ+)M⮨Sݺ_Hn}XgnlN?<^n+u ibFіs])lJIp3{^.N6rN%(AwZ0y9"ѵ/6.9g 2JA[4tuA˭RL̷ASRR4iV.1J%oٖW+-u]j.VzV* J}}V@jm4l+V쓴@ *2I&ϥ3^Jq [npailPuYWOT&:,I9 AWcH)nY ]UC%V7jb^I~PXؠENJ!q ºJL2Gsg!9NzB # xDק])%;:M~uqUr PpȨU>D(B I~ZW)MiCA)$ϠJj1O4ޤ{IRӖw\Y@%Dx|+hCp}BOH3FTJ TG.z&끋z\<40X)~v2 r]&@#/țذ7.]=HŔ`Z0@gzzV()<Âɘ4a k@$(&LNeIhr- $ALO!=wdl} tIJU3T("v)9RnW0;XW?ΊgP?Nێkv˧NYvf]$)< nYML(0ä A+r1xL0X)\x Hxశ); HX.sǷF 2z"L'\cMAVd9|9g$9wV'+|rgAR?B[.(*.\7Uz Q1dcU/I(Z\%52^wS:^&y0ym-7L}FKGRgHc;.|=v@p[X4dہ%h,H\4 {W̘pCOytS rIy;r#еAڐq ~ƞޑX}o@F#m'6}3HsJm9NZԇT>ALV9&%KgTn-J@?8r{OT^<}p-nNfU;ב1pZ17%qIT(ǘ6j˜ o Bz_$ X<ډfe㉆'pz3Q~'<}4[V8R XktDǖfK4K[ Jɖ69_ԆmS=q+ ݩ;5=Пu}GMQ2C3uD;T%T5Decޭ+)MbS`ޭ{YwB>sdSœ|Fe$Qv|G#v-zN5]s6~*rhͦ&uv]])-ldW6;^d|nTyy =D ,y,n .%n4 l*JZZ%Iok>܅Wg?)&(+%)b2M&v"sⒷq@6 :-{>%H|NOWW7\ d,2[-fMнu/ Oq,GΏ] eE9=ߕ{tOK~{ !Y[^ {|/|}!TS)K!.HV~=0øsa̵m)0Wv w]OJekTw5E\jVkĺZ%򎥖 lVWPJ.lPē)(1I]쏥c@}wRSHkA!~z"O|w)B3A=7n_УâCYx!R ƒ֟SWIcNyGd(!e˘O{waO]˼fvӷm_VT;G#{w&O>0ٷ,cS'I} "Z` -h̉$HHGUlIQ)[IO\?G_F--xk S6*m^i0ˏc·Ϸ2??]T黨wQ*]?Ɛ`0,+tټKJIP =3,*l! |؟ʬ)ss񫔙]_4-%h _,]=~Vt~y}|jȰ.&]D% y?v{*tT G@48( "] |˕ 02Ƅ^2ˤ-LXo/rZ_ܐAJ HȠq@""G>ψ/?lJRZ=hm§D2RU 74jJ 'qZm"LKDiL /M$W@2$#fĽ7ڠBCT$Xy=m$}RŲs=H,Vø3>^lͲ[-OR\xtWSAuS!Fb}_. KP ryf/ {&E[('ci& HNDw-jR`573WxDZk"lnD>D+!׌O(o˦BJc J", @KUlq0jUԂ9I9\Z** IE[)K C^]Z}Hz+u3XezIՁ=t ]1=j_cenh8q8E:-I-C˜4@ s_k[9NMBk_YR!lP^4Ȁz?:'-)$ 679PuXL9[͙іa.2%2Y $`Bs)` F #J5S/F+]3F). IA=>ښнp)| VN.%_ڸ\k/ J@1|8\&|,.}9 Z0R.y !C  2w'xqG/ j'qMh//ga/.xe3m`[^ƱVtօNVWGh!n U(!B2ҋ'f#;,^ڔQ@gQ@*L)c_I}zUOUz𓀨BPZnl8QDrm]N,yg3e(C4wa]jcc 0L[jSã_n54NoYv L'4V./h54 j!f)r"`vps)L ߝ^D)C^͓s(wu^"Ғ gܝE!VB tQ07,"Iw1CIL}*M35?yx{岾6fńdLB !z6;\)ۻ@ͥ52(Pң}4k S&gY#$ƯXkiƸwB|Tpx5>*hU[E]kNϫHh83 mdn΁dșeX(7 j 6Ō!Vf\3]jd"$\ܛi%餍CǞ:$|?. ɇJYgnlU}ZF7̃ޜ{6gW_ʟ/^]8sg-X1T89z]:?^*ZT\j΃7RWe~g%*(EN:)JFC B.8"m-LjGWM,]bU5Qlʢ4,ɛ?,-w{ځx?F]F?&f_]qsƣX t黝KK ~ zCr0 ;D-d#. JT RC42jMmQR{;vQQUK(TS:yw)5ce*#pzYgaq{huIQ"b|2„ڿݹC(cɏJ;}\&~A37wWcЛ,cS^i:=HɅl$aG^S}si=g[Ql{OhM|hݴQSnĘN3xSh-lwBqM)9|˻qVGA>wcZ:f4 oD;TV{B^øaԑ%\9Gn3$7oCm}1Y\ A}uqqwyv/_&5[MavwV[~'|Vs^Jt/l/7{ÏˋrGvOCVr* d)*l/*>TbTҲr<4TuSȨQⓙ!e`I**2T=K‚I_WPת65xTF9ੌD뉌$ʞ<}Z +66}icy 3 e2 OCFg8+9~n)-r!ٱ;` VI% v\7AWyӬ?Y_ڣ:F)EԲY.46JE x Ҕ|յ)'$ta~ZlPfvEsLU>\ח7E<\ W}(az =׃`\DN .t9>(MHy<4Rǘf[!^>[/^qƘ+<-$ڀVTKpc@*T^m!L tQeg{S2`( yn6.DiZ)l&rm[(W%"ׅ1E啷4~po/ o BVou \.oW/۫l~u1ZkR6z/$,wROƩ# h(lIAY>`k.t 2t_7y R /nF!sc/c6 r e>PJ*`/%K5OA"W`t%nz?[L|_qԲlu-SJ\~ۻ[XPY?eoDi9lo{Yw?]#4P8&هWYܮTVrF=8@(H s{s畧jz8'hxYy?PpfW+{UI 5D3u?A"싽y}{xko^DypW1GŽz ȨdA "x|ޱ##s4@x\#{n'$>6F_$yzcx d} 9!s.E*Hj0gqj3E a\J;k '*S.Z;Q4M8(4y$B {J>3HP&&_la5{X=S 1*|B[-+D*p \y[\+w6W{fi&)3TKݎo&a ;: F|1ma6DOmq um_^Ss3&  7je-Go b.mqkmL <Z[ehȊfiÁ9љRqR9'3^q9{3i 6;{˫GbqF5 q2bX]ni3pi- 0: «x]j DA{6I*AV֗MAZ_c SMN7N*1MMRѦq@x]7"a!߸&cfR11gnNx-yޭ M4ɦ-1n$bT bL'mSdDch6.+[_Ѕ< ^zEWr ݙ=׳+J/yۇPʅ/nBQ:8r_O@2#]vz_>)V{!7(ͪ;ܝ.UYׅԅ(u!+6V KZ}0^ ^D:q>|[o Z ~/]R?h#DK{% I jk+])ae ~TCU,Zz# /5: 3ǝQ s '-Z0qWc?yRջPPδDDdX;t]Jl_0\,;NBZz6kf7V_ϮoGFʳV~nߖ|76]Qm  rsowA=7`;Ic;o]ߌ=F#$KYI.m!`׮v! ':$/8 2;T8 8Cn-x@rp$Ϩ6k €@ryAz^LJvԧ[RjY;l.㬔9Lj]IX.O7J RJi>ݔZJ"VTF&+5jV*#SRj =g+iT\ NJwK})|+}nVD2)2gAjI8>k+:J+>2 +%ԒT VTFX+ۧC"[ P(tCjI9V,TDZkAEC4w*Efb"}$ Brv:%B*CV cW.ж*as#jTdfI=IskΓ~Qe0O'<ČI۰t@/|x¸cYH{,\ @c$򂚢`=nl1oKB ],e&@Q*jxƳW)G)"#"Dr^隻wo} 4"-Ɨ6ZriotߚW[NF SP6}*vr6D sF!1ִ(6wDįX˲kTFX9\k|fdGk@4 Z?Sg;#@gZJV1/Kx/km(rY*>+{y`x 4rť0мePh'ˊBK*_@@l#Vd#8@;[3c7Zy!pA8Y)L,q%*FJ _-ESC8K_h(BΕRD 8ȭ9.!m8Zl_ʰJQm+qc r34gL(ml{MםM+? ~q_uzh|Z"r?ћκ EJN"cRjrƞc0#]"QPz4(B|N43yz>l3 onk a%X!gA_7s#*r^(F30P9gVŽj* r%Gtv0tq BxϺL.u(xԭݝv4uѵX/F/ԭ4j{a$n0pAޙ`GPqZ=3 ;< Ƿ5̠`3;|;ؿٻFn$WzXWPl>lÞyrtTcHEJ*(uPbD*3_:3's R'54 ;d|41RS$~$B2D'DHM8n6U;EҒ 1eynrCԾv @ e;IoZJY'Lj .S dtAPw4*[+^:::t.SY?Ԉs{ts1FXN;n="8活wG]tçoycAa{͇*((SL`81d"(-r잓kSbKɾkMMּJFl^jK6&Js4q1q,%S(H ⴠea @+gZ`klP[Q/QYޅḒ 映0sҔ#Pc!AZBReE"1-Z#Z#pDӋ ~>a'[ά?gfKۭZ]N|UcyS_lCL(&wb?] ZKUpLP.&':7`ʒZṃΨbQ[`[ P;1~]ws csvsƃI$ bynۤxw" ? zChW;GqIHɛ!ꂚZw| o㻙 z/ %=>kfBy* ]4qRxQEv`qH$~wY+LS+uG.@!5V5F 4_]6)mZ[4'$6OkaN(?nIM c Bbфajw$@]4ʧ|=ژgyHqGQyji񹆡xw(brэ 1-!&[ o)= Rxwѓ}D\1nqqe\ӓ'yNzrT Y)Td dԬNMwIR>$ey.v[2/=,Ѕ0/%ը$楕KKx |;Ω᥇ݔ/=k/楰, 1Jj`RPԷRyҳRBüTE8VRfcD(IUOh-}5&\Rnr̹fV@TX, Y\sV`unSx1a6VS?iO=twŤ ~!%^*j)Q8ł[{!FW25F"Z,s\֝#[xRhۈ<7qUl˖I1cı U*B%8R,p8Mcc dsF,GxBc^wz\z*nW^=@$QD捗b4.Ii6]oC9@&, * 5&JHT:^ ʼn91D d2 1X&QIT&aN'lsbTJ]; ^8^[j ,37M1@uD'H{ɢ"fh0i/S`_3S_ (37=I9R y˞!K*C^yަ? g\aسGDv{\V&ϼ2)Y{#"P}pӞtnΙڴ]e獏{?d eL4r ʰZ 1S/ 4K e :nvl:ekvX4 Yn)g=m@1H)T*bGZHε-,E[)J I.̸-k%,i@1DnVH7ˆt!˂]Z"4jN<5Q%T9!DH-ȩ֣ځwW>{ƒo&GۤX5+Pg}z缙$mXv1Y=1Scz@],2vX`:T ݭFϟ|qG~FQ%Ϫ/-hE&׿?{IXfţvad }7.X,-u曫Pib'Οe%.UԱ 8 aAEȌ)rqbkBvFF;cu`d-V7WW4Vx[bTRt]brod1"U 7r C]rp!Segѭ,I| $*JBb0*9]sd:grN^]{b'UL-*NX:r*&(<čαTJ:°i*W; XJ꘮ol-bԢ { K !Wکxf̱5TrŅZ"#*{/e!W\I8Q=|.>{ŷWwg4n hM' 3~V%4g%,٧.!QcjZBX8] #$*1tHZ9r)b&2f8|YyFh-`$՞VSdjR!YMe7rc? h; \rEZ//%"KBvDq>M!EEp+'U[:hK[pBwa$e\ $)ZR1^r90W~{}5YXm0V9c/%i 2NA%晶E0þ>sV-Y^liF!\Rx=}rԧڍ~_b;nYٖVQ,-;<>r6xf:(j0!8~ MqzX&T?a[}+ƱT?zciC0K=E7;HP\b%LP_Zc "Px3)Vx vPueb; ӷ),;tI(,!J܏#F*#݇C/o=l "w/F/wˋ;G+Оmk9Q F{X?2 ʒ|r΍M8~Gڄk?Vs{wl$-wЪqhЪt`,Ъ!$7Dvg֬0;RPh#Rtv^EηܠX4\Q  J˜3J 0˝K _v(8)N#L5UzhhO4[%w sc `,g ,ɩrlɱD_bI>(UZκb[.IŔS$CRz`0,omD΀@h45D K=GfWZ7b7Um !#I/cĀpkVw7& ,M~H6qؖяV#m"nJBz\r y*âwC@ ;šz \>a`*zVMok!6HXf?ŽAF `$ef|_b2% YnX:]PW[)i-?kA*LU2-{OF?@̾'ᥖZ̢qHcR[ J Ʉ#_dE^&}Gݍzi+a+ noI VD(FAk3O"x^dKHV D2{O5TqgTZ~ɹ^f3k,Jfh-Ħl[\ z>t7%巻̱wD= Ug'|E`c}gI4c$C"康\ǀ>5Ժ J0+OU Jw>hʫ/_C p$ ҋ"y 1AF* #P^@!R\wru`r+_؝w>Ŏ)0msfK> P!aDC. (4d,@DDD<"J 8V';,M7-Q6";7vWb)W.0&J2!q!Ԅ@aF k6FҸzCh 8el?0Mr !$l|90| %=cuޛL-RJE;für#ŷ4`]"߻Qϟv j"lS{)p,-[Ace}򉄭kiN95yʼn}*X9>i];7QQD}jWrXyP-w.FN- %}r:W/,}P̦L夂Хuĝ14нK/ܖ.=' ҫvχC}(|Rq8Is' jRY7lL΅\/]8ԦtcRp2c/MtiI H,Wi\I[#D'_ *Vfl|>+rJ< $("ɪKaASX'*YÔ"J뤒IquVN$lwpλI0}h/Dtt㊞p,Ѡ@Z9[W8UZ-Cje-/G D-c|D"ܯ/цlDndަ'Vq) Ym.ЯgWb}yQx0 {4֡1&r<,d7HvD/'-Dv& rX5vaYRDӽsMb`+tvӅ518(ϑ0 (>2K++I:DhkO{qOE߷@D,0I|[nBkl8<ǁC-|">-AcK(Ukg+3[8h:)yD8(E@q6dLj2LO NlFa+p٢~1YL7ޣGFm >6?{A~x잝tON:{{|p:RR/W:~‹x&Y|m.{3mNLoևU@W3"Ի]  tB+ϰ^4,?Fz5~XL=~y7qo0~_-0OO+=a$Oj!{5kCUx?{~?>Aj u></.+n3(B3LZ M,d:\7_qoۍH.xB ú`iOV>FZaBO >Һҟ5cHxz(n{g{lo~g>T;R Mq~y篕FZ+vx?E >gy4 G^nVY_Z/8ytߜuo5G/^잼>z=9p5vIs "t@6'W떺4sqs: zGO~?{zGEGzќu~ywio4lV7E}iٛ\B#S84Mj`Nf|y4>t޽o5޸|p~61n4&cO-v-6Y\'L;v'~YO7*<:.J:o^?SDZ,w>¦}gu&#M*63XKJ zwu={f|iY}tp_S='٧7+@ ,޽M L[߻4T=n8ZimIAM[я=55mc쩠yr؇p[VmHId V xdςca`b=9XvݕE~iMצ}fVdE/VuO)KL xsڞ.ޙ o^<~5Ԡ20PbU}&`O)AooI/&7Uepϴ< x`%)< JNZYLzuGŭ)-mgqҨ\ξOc }Sr^{Xi1ڝ+}ޞT k|Io ZY+[ʎa3ݦM>F#- [D6,U;mi#jxZ]0̀Co͹ql1[8FIzzzZ`i~x_?o!WL] Q1eFD5xZgkYd޳93CG$UrRڛc];Z,Vq;3suJ;ǽaLpED @#)$0"$!*`AT C _f6_:]͗\N@/=CYLb4GǭЧ粃prfFL2ə]n/W0L%BJqǀX/NG}Fa|+C0^!jDməs@%޺Νpw[bRoke#IA9hDK0g>`!|Hq-1>Q`"J(~sQTPӫrœE>_kUٍǾ٥-yLs %hɸ02?5<iBu%15 *2d1ځt=^^K3 \&Y (& UsK%,"ۄm -ڨ>;k;~p&r7 ^6`ۊ 6d<Qa3@{dKg-ͥ{o5nzi%2WzmŔrlnȠ?K3jt[f8X\辇THJ?\Dvr Q{p-?O&FXƭ`]A2f IFn6o1C>ވf՘0oI–V׸zc)o?M wh4 2-(p# ੣186bxAyC(1]D$rgSfw;ϭEᦚ+2jK;7ZFuT FEF<prߊJvSqW$RRQXvИ!+֪m9<Ԣ I*]вc0 %cjW8j|8K\KԒ })sq 9GBG]cلbX.Rj˒ P]s^.EˎMS`mu@uΫEKTQ\η&Wq[YN1Hi3 &DŽ9J*8;gR<%8ץ$Xޣt+);FSڤ[0~GV|,S1rg7|]zL;̴ :D2*}:QGJ8W}`xgχdex&EЄ.- TbJP~kle$=Y8sz|^ت/>`PCX@HH؇J!U> Ҙb?7mS_@MZl&srwDJl(K1 eZujpVENY|f(ҤLXoY 9 @e0*(h%bbXB)(X˜Q4RDGB;e ҹfҸnY^'cra-5u`2t97tá_a[G鴱9Boȥ Ùn..-ܡA-|oKY`.5ژC*\ukPsR(ܸ Mo2!үA]L~#\tY'var/home/core/zuul-output/logs/kubelet.log0000644000000000000000005011470415134346312017701 0ustar rootrootJan 22 05:18:32 crc systemd[1]: Starting Kubernetes Kubelet... Jan 22 05:18:33 crc restorecon[4574]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 22 05:18:33 crc restorecon[4574]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 22 05:18:34 crc kubenswrapper[4814]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 05:18:34 crc kubenswrapper[4814]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 22 05:18:34 crc kubenswrapper[4814]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 05:18:34 crc kubenswrapper[4814]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 05:18:34 crc kubenswrapper[4814]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 22 05:18:34 crc kubenswrapper[4814]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.138178 4814 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144587 4814 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144619 4814 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144655 4814 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144664 4814 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144672 4814 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144684 4814 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144697 4814 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144707 4814 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144715 4814 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144724 4814 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144734 4814 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144743 4814 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144753 4814 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144762 4814 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144770 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144778 4814 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144786 4814 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144794 4814 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144802 4814 feature_gate.go:330] unrecognized feature gate: Example Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144814 4814 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144823 4814 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144833 4814 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144841 4814 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144850 4814 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144858 4814 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144866 4814 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144874 4814 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144882 4814 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144889 4814 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144897 4814 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144905 4814 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144913 4814 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144921 4814 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144928 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144936 4814 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144943 4814 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144951 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144959 4814 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144966 4814 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144975 4814 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144983 4814 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144990 4814 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.144999 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145006 4814 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145014 4814 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145021 4814 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145029 4814 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145038 4814 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145047 4814 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145055 4814 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145062 4814 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145070 4814 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145079 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145087 4814 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145094 4814 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145104 4814 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145115 4814 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145123 4814 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145131 4814 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145139 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145147 4814 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145155 4814 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145163 4814 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145170 4814 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145181 4814 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145191 4814 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145202 4814 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145211 4814 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145219 4814 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145227 4814 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.145236 4814 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145396 4814 flags.go:64] FLAG: --address="0.0.0.0" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145413 4814 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145428 4814 flags.go:64] FLAG: --anonymous-auth="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145441 4814 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145454 4814 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145464 4814 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145475 4814 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145488 4814 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145499 4814 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145508 4814 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145518 4814 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145527 4814 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145536 4814 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145546 4814 flags.go:64] FLAG: --cgroup-root="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145555 4814 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145564 4814 flags.go:64] FLAG: --client-ca-file="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145573 4814 flags.go:64] FLAG: --cloud-config="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145581 4814 flags.go:64] FLAG: --cloud-provider="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145590 4814 flags.go:64] FLAG: --cluster-dns="[]" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145602 4814 flags.go:64] FLAG: --cluster-domain="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145610 4814 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145619 4814 flags.go:64] FLAG: --config-dir="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145654 4814 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145667 4814 flags.go:64] FLAG: --container-log-max-files="5" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145688 4814 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145697 4814 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145706 4814 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145715 4814 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145726 4814 flags.go:64] FLAG: --contention-profiling="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145764 4814 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145774 4814 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145784 4814 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145792 4814 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145804 4814 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145814 4814 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145823 4814 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145832 4814 flags.go:64] FLAG: --enable-load-reader="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145841 4814 flags.go:64] FLAG: --enable-server="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145849 4814 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145861 4814 flags.go:64] FLAG: --event-burst="100" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145870 4814 flags.go:64] FLAG: --event-qps="50" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145879 4814 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145888 4814 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145897 4814 flags.go:64] FLAG: --eviction-hard="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145909 4814 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145918 4814 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145927 4814 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145936 4814 flags.go:64] FLAG: --eviction-soft="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145945 4814 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145954 4814 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145963 4814 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145974 4814 flags.go:64] FLAG: --experimental-mounter-path="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145982 4814 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.145991 4814 flags.go:64] FLAG: --fail-swap-on="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146000 4814 flags.go:64] FLAG: --feature-gates="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146011 4814 flags.go:64] FLAG: --file-check-frequency="20s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146021 4814 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146030 4814 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146039 4814 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146048 4814 flags.go:64] FLAG: --healthz-port="10248" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146058 4814 flags.go:64] FLAG: --help="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146067 4814 flags.go:64] FLAG: --hostname-override="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146076 4814 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146088 4814 flags.go:64] FLAG: --http-check-frequency="20s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146097 4814 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146106 4814 flags.go:64] FLAG: --image-credential-provider-config="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146114 4814 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146123 4814 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146132 4814 flags.go:64] FLAG: --image-service-endpoint="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146141 4814 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146150 4814 flags.go:64] FLAG: --kube-api-burst="100" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146159 4814 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146168 4814 flags.go:64] FLAG: --kube-api-qps="50" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146177 4814 flags.go:64] FLAG: --kube-reserved="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146186 4814 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146195 4814 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146204 4814 flags.go:64] FLAG: --kubelet-cgroups="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146213 4814 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146222 4814 flags.go:64] FLAG: --lock-file="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146232 4814 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146241 4814 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146250 4814 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146263 4814 flags.go:64] FLAG: --log-json-split-stream="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146272 4814 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146280 4814 flags.go:64] FLAG: --log-text-split-stream="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146290 4814 flags.go:64] FLAG: --logging-format="text" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146299 4814 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146309 4814 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146318 4814 flags.go:64] FLAG: --manifest-url="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146327 4814 flags.go:64] FLAG: --manifest-url-header="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146339 4814 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146348 4814 flags.go:64] FLAG: --max-open-files="1000000" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146360 4814 flags.go:64] FLAG: --max-pods="110" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146369 4814 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146378 4814 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146388 4814 flags.go:64] FLAG: --memory-manager-policy="None" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146397 4814 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146407 4814 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146416 4814 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146425 4814 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146445 4814 flags.go:64] FLAG: --node-status-max-images="50" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146454 4814 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146464 4814 flags.go:64] FLAG: --oom-score-adj="-999" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146473 4814 flags.go:64] FLAG: --pod-cidr="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146481 4814 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146496 4814 flags.go:64] FLAG: --pod-manifest-path="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146505 4814 flags.go:64] FLAG: --pod-max-pids="-1" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146514 4814 flags.go:64] FLAG: --pods-per-core="0" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146522 4814 flags.go:64] FLAG: --port="10250" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146532 4814 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146541 4814 flags.go:64] FLAG: --provider-id="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146549 4814 flags.go:64] FLAG: --qos-reserved="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146558 4814 flags.go:64] FLAG: --read-only-port="10255" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146567 4814 flags.go:64] FLAG: --register-node="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146576 4814 flags.go:64] FLAG: --register-schedulable="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146586 4814 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146606 4814 flags.go:64] FLAG: --registry-burst="10" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146615 4814 flags.go:64] FLAG: --registry-qps="5" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146624 4814 flags.go:64] FLAG: --reserved-cpus="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146659 4814 flags.go:64] FLAG: --reserved-memory="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146671 4814 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146680 4814 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146689 4814 flags.go:64] FLAG: --rotate-certificates="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146698 4814 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146708 4814 flags.go:64] FLAG: --runonce="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146717 4814 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146726 4814 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146735 4814 flags.go:64] FLAG: --seccomp-default="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146744 4814 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146753 4814 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146762 4814 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146771 4814 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146781 4814 flags.go:64] FLAG: --storage-driver-password="root" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146790 4814 flags.go:64] FLAG: --storage-driver-secure="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146799 4814 flags.go:64] FLAG: --storage-driver-table="stats" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146808 4814 flags.go:64] FLAG: --storage-driver-user="root" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146817 4814 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146826 4814 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146838 4814 flags.go:64] FLAG: --system-cgroups="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146847 4814 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146862 4814 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146871 4814 flags.go:64] FLAG: --tls-cert-file="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146880 4814 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146893 4814 flags.go:64] FLAG: --tls-min-version="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146902 4814 flags.go:64] FLAG: --tls-private-key-file="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146910 4814 flags.go:64] FLAG: --topology-manager-policy="none" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146919 4814 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146928 4814 flags.go:64] FLAG: --topology-manager-scope="container" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146937 4814 flags.go:64] FLAG: --v="2" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146949 4814 flags.go:64] FLAG: --version="false" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146960 4814 flags.go:64] FLAG: --vmodule="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146972 4814 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.146993 4814 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147241 4814 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147253 4814 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147262 4814 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147272 4814 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147280 4814 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147289 4814 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147297 4814 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147305 4814 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147313 4814 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147320 4814 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147328 4814 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147344 4814 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147352 4814 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147360 4814 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147368 4814 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147375 4814 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147383 4814 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147391 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147399 4814 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147407 4814 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147415 4814 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147422 4814 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147430 4814 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147438 4814 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147446 4814 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147455 4814 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147465 4814 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147473 4814 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147481 4814 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147489 4814 feature_gate.go:330] unrecognized feature gate: Example Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147497 4814 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147505 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147513 4814 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147521 4814 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147530 4814 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147538 4814 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147546 4814 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147556 4814 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147568 4814 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147577 4814 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147585 4814 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147593 4814 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147601 4814 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147609 4814 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147617 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147648 4814 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147659 4814 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147668 4814 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147678 4814 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147687 4814 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147695 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147703 4814 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147712 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147721 4814 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147729 4814 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147737 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147745 4814 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147753 4814 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147774 4814 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147782 4814 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147789 4814 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147797 4814 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147805 4814 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147813 4814 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147821 4814 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147831 4814 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147840 4814 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147848 4814 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147857 4814 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147879 4814 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.147890 4814 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.147913 4814 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.157471 4814 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.157533 4814 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157734 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157781 4814 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157793 4814 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157804 4814 feature_gate.go:330] unrecognized feature gate: Example Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157814 4814 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157824 4814 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157832 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157842 4814 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157850 4814 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157861 4814 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157872 4814 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157886 4814 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157895 4814 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157903 4814 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157912 4814 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157920 4814 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157928 4814 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157935 4814 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157943 4814 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157952 4814 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157960 4814 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157967 4814 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157975 4814 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157984 4814 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.157992 4814 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158000 4814 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158007 4814 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158015 4814 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158023 4814 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158034 4814 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158042 4814 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158050 4814 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158057 4814 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158065 4814 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158074 4814 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158083 4814 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158091 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158098 4814 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158106 4814 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158114 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158122 4814 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158130 4814 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158138 4814 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158146 4814 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158153 4814 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158161 4814 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158169 4814 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158177 4814 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158187 4814 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158196 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158207 4814 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158219 4814 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158228 4814 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158237 4814 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158245 4814 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158253 4814 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158261 4814 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158270 4814 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158278 4814 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158288 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158296 4814 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158305 4814 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158313 4814 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158320 4814 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158328 4814 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158336 4814 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158344 4814 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158352 4814 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158360 4814 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158367 4814 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158376 4814 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.158389 4814 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158620 4814 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158663 4814 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158671 4814 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158680 4814 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158690 4814 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158698 4814 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158706 4814 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158715 4814 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158725 4814 feature_gate.go:330] unrecognized feature gate: Example Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158734 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158742 4814 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158751 4814 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158759 4814 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158767 4814 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158776 4814 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158786 4814 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158797 4814 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158806 4814 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158816 4814 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158824 4814 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158832 4814 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158840 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158849 4814 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158857 4814 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158864 4814 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158873 4814 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158880 4814 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158888 4814 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158896 4814 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158904 4814 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158912 4814 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158919 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158927 4814 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158936 4814 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158945 4814 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158953 4814 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158961 4814 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158968 4814 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158976 4814 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158984 4814 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.158993 4814 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159000 4814 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159008 4814 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159016 4814 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159024 4814 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159034 4814 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159043 4814 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159053 4814 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159062 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159071 4814 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159079 4814 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159087 4814 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159096 4814 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159106 4814 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159117 4814 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159125 4814 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159134 4814 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159142 4814 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159150 4814 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159157 4814 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159165 4814 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159173 4814 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159180 4814 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159190 4814 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159200 4814 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159208 4814 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159217 4814 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159226 4814 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159235 4814 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159243 4814 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.159252 4814 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.159265 4814 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.159547 4814 server.go:940] "Client rotation is on, will bootstrap in background" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.163827 4814 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.163969 4814 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.164925 4814 server.go:997] "Starting client certificate rotation" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.164966 4814 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.165455 4814 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-22 21:30:45.507228853 +0000 UTC Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.165572 4814 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.173729 4814 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.174187 4814 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.176257 4814 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.189234 4814 log.go:25] "Validated CRI v1 runtime API" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.210550 4814 log.go:25] "Validated CRI v1 image API" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.217657 4814 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.221722 4814 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-22-05-13-07-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.221801 4814 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.241971 4814 manager.go:217] Machine: {Timestamp:2026-01-22 05:18:34.240376388 +0000 UTC m=+0.323864663 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2800000 MemoryCapacity:25199480832 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:aaa9af76-19bc-4fd1-8c88-46d65f8fe036 BootID:9001f652-05f0-41c2-9b56-281608fe470d Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599742464 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:3076108 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599738368 Type:vfs Inodes:3076108 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039898624 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:2b:d7:ff Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:2b:d7:ff Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:bb:c5:36 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:ca:86:90 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:e7:75:a1 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:90:03:7c Speed:-1 Mtu:1496} {Name:eth10 MacAddress:d2:ca:68:66:8e:73 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:9a:d9:e3:b3:17:18 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199480832 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.242316 4814 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.242617 4814 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.243273 4814 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.243567 4814 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.243654 4814 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.244025 4814 topology_manager.go:138] "Creating topology manager with none policy" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.244047 4814 container_manager_linux.go:303] "Creating device plugin manager" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.244354 4814 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.244407 4814 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.244816 4814 state_mem.go:36] "Initialized new in-memory state store" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.244953 4814 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.245831 4814 kubelet.go:418] "Attempting to sync node with API server" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.245867 4814 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.245912 4814 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.245940 4814 kubelet.go:324] "Adding apiserver pod source" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.245963 4814 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.248348 4814 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.249197 4814 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.251129 4814 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.251919 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.252063 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.252705 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.252829 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.252957 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253002 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253020 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253035 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253059 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253075 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253090 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253114 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253134 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253150 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253172 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253187 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.253824 4814 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.254695 4814 server.go:1280] "Started kubelet" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.256469 4814 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.256536 4814 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.256765 4814 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 22 05:18:34 crc systemd[1]: Started Kubernetes Kubelet. Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.257430 4814 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.261418 4814 server.go:460] "Adding debug handlers to kubelet server" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.261496 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.261910 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 10:45:24.433608271 +0000 UTC Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.262536 4814 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.266432 4814 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.266771 4814 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.267023 4814 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.268360 4814 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.268028 4814 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188cf5e43bd040e3 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 05:18:34.254614755 +0000 UTC m=+0.338103010,LastTimestamp:2026-01-22 05:18:34.254614755 +0000 UTC m=+0.338103010,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.273143 4814 factory.go:55] Registering systemd factory Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.273184 4814 factory.go:221] Registration of the systemd container factory successfully Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.274324 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="200ms" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.274485 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.274669 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.274840 4814 factory.go:153] Registering CRI-O factory Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.274873 4814 factory.go:221] Registration of the crio container factory successfully Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.275013 4814 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.275056 4814 factory.go:103] Registering Raw factory Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.275090 4814 manager.go:1196] Started watching for new ooms in manager Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.276310 4814 manager.go:319] Starting recovery of all containers Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284475 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284553 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284578 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284601 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284623 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284755 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284774 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284796 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284823 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284845 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284865 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284888 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284910 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284936 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284959 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.284982 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285003 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285024 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285043 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285064 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285087 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285166 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285268 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285303 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285328 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285354 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285387 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285419 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285446 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285469 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285490 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285511 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285530 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285548 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285567 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285586 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285606 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285654 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285673 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285692 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285711 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285730 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285748 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285769 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285788 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285808 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285858 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285880 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285902 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285925 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285944 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.285966 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286010 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286034 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286054 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286077 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286096 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286115 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286134 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286153 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286174 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286199 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286219 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286239 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286258 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286277 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286297 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286317 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286336 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286355 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286374 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286394 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286413 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286434 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286455 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286475 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286498 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286517 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286535 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286555 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286577 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286605 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286650 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286671 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286692 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286712 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286732 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286755 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286775 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286796 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286817 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286838 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286858 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286879 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286898 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286919 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286939 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286959 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.286977 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287004 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287023 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287042 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287063 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287082 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287109 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287132 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287155 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287176 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287197 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287219 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287240 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287262 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287285 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287306 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287328 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287354 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287381 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287407 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287435 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287461 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287482 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287502 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287520 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287539 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287561 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287582 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287602 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287623 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287703 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287723 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287743 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287763 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287782 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287801 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287822 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287843 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287864 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287885 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287904 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287925 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287944 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287964 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.287983 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288005 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288029 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288047 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288066 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288086 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288106 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288126 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288144 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288165 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288184 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288203 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288224 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288244 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288264 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288282 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288304 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288324 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288343 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288361 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288382 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288402 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288421 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288441 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288460 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288479 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288499 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288520 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288537 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288556 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288576 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288593 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288611 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288680 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288700 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288718 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288735 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288755 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288775 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288794 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288813 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288832 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288851 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.288870 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290122 4814 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290196 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290235 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290263 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290290 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290319 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290341 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290362 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290383 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290403 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290422 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290443 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290462 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290481 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290500 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290519 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290540 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290558 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290578 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290598 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290617 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290674 4814 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290692 4814 reconstruct.go:97] "Volume reconstruction finished" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.290704 4814 reconciler.go:26] "Reconciler: start to sync state" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.306556 4814 manager.go:324] Recovery completed Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.325743 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.337244 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.337323 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.337350 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.338448 4814 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.338475 4814 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.338505 4814 state_mem.go:36] "Initialized new in-memory state store" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.339623 4814 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.342332 4814 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.342392 4814 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.342438 4814 kubelet.go:2335] "Starting kubelet main sync loop" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.342497 4814 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.343297 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.343377 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.357530 4814 policy_none.go:49] "None policy: Start" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.359604 4814 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.359659 4814 state_mem.go:35] "Initializing new in-memory state store" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.367783 4814 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.410859 4814 manager.go:334] "Starting Device Plugin manager" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.410998 4814 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.411011 4814 server.go:79] "Starting device plugin registration server" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.411421 4814 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.411433 4814 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.411777 4814 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.411911 4814 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.411924 4814 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.424073 4814 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.442786 4814 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.442887 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.443958 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.444035 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.444050 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.444353 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.444489 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.444545 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.445960 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.446002 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.446023 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.446117 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.446153 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.446170 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.446495 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.447180 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.448781 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.448856 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.448871 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.448926 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.449052 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.449232 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.449288 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451391 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451444 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451443 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451476 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451488 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451488 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451523 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451540 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451456 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451661 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451660 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.451909 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.452269 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.452306 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.452315 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.452536 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.452561 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.453396 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.453454 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.453463 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.453735 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.453752 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.453760 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.475286 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="400ms" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498404 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498465 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498519 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498551 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498585 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498658 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498690 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498723 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498753 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498783 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498816 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498847 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498875 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498905 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.498981 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.511532 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.513203 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.513249 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.513296 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.513328 4814 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.514165 4814 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.110:6443: connect: connection refused" node="crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.600474 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.600814 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.600909 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.600950 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601037 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601111 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601079 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601144 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601204 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601260 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601207 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601234 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601384 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601392 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601519 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601590 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601561 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601663 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601688 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601670 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601753 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601814 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601853 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601913 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.601957 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.602018 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.602028 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.602110 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.602173 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.602274 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.714687 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.716723 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.716766 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.716793 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.716838 4814 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.717322 4814 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.110:6443: connect: connection refused" node="crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.788303 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.817413 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.822587 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-c6322196c69c595016716e3b92571b56408fc86485331c362e5620e50652a3fd WatchSource:0}: Error finding container c6322196c69c595016716e3b92571b56408fc86485331c362e5620e50652a3fd: Status 404 returned error can't find the container with id c6322196c69c595016716e3b92571b56408fc86485331c362e5620e50652a3fd Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.830438 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.851340 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-1b8c8b246b5163465be548b8b22d7907a5d4c74048670d83a696420d6e72fe1a WatchSource:0}: Error finding container 1b8c8b246b5163465be548b8b22d7907a5d4c74048670d83a696420d6e72fe1a: Status 404 returned error can't find the container with id 1b8c8b246b5163465be548b8b22d7907a5d4c74048670d83a696420d6e72fe1a Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.851900 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.854549 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-6d91867a04d3326f4159f4c4f0ebca153ca0bc2b3800de25c345f22cd7f053ab WatchSource:0}: Error finding container 6d91867a04d3326f4159f4c4f0ebca153ca0bc2b3800de25c345f22cd7f053ab: Status 404 returned error can't find the container with id 6d91867a04d3326f4159f4c4f0ebca153ca0bc2b3800de25c345f22cd7f053ab Jan 22 05:18:34 crc kubenswrapper[4814]: I0122 05:18:34.863384 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:34 crc kubenswrapper[4814]: E0122 05:18:34.876387 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="800ms" Jan 22 05:18:34 crc kubenswrapper[4814]: W0122 05:18:34.876889 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-5f738c60f7966a2e6149fd36b0d5a582a889cc656e4d8b7540a3988c52e7ccb6 WatchSource:0}: Error finding container 5f738c60f7966a2e6149fd36b0d5a582a889cc656e4d8b7540a3988c52e7ccb6: Status 404 returned error can't find the container with id 5f738c60f7966a2e6149fd36b0d5a582a889cc656e4d8b7540a3988c52e7ccb6 Jan 22 05:18:35 crc kubenswrapper[4814]: W0122 05:18:35.101012 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:35 crc kubenswrapper[4814]: E0122 05:18:35.101106 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.118064 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.119899 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.119944 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.119959 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.119994 4814 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:18:35 crc kubenswrapper[4814]: E0122 05:18:35.120392 4814 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.110:6443: connect: connection refused" node="crc" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.258281 4814 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.262447 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 13:11:37.607500245 +0000 UTC Jan 22 05:18:35 crc kubenswrapper[4814]: W0122 05:18:35.281662 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:35 crc kubenswrapper[4814]: E0122 05:18:35.281747 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:35 crc kubenswrapper[4814]: W0122 05:18:35.328236 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:35 crc kubenswrapper[4814]: E0122 05:18:35.328315 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.348387 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3f0446ca1087571f7017e691c05bd2922f51c6642cee288f4999bd0428bf34f2"} Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.349502 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5f738c60f7966a2e6149fd36b0d5a582a889cc656e4d8b7540a3988c52e7ccb6"} Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.350414 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6d91867a04d3326f4159f4c4f0ebca153ca0bc2b3800de25c345f22cd7f053ab"} Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.351419 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1b8c8b246b5163465be548b8b22d7907a5d4c74048670d83a696420d6e72fe1a"} Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.352275 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"c6322196c69c595016716e3b92571b56408fc86485331c362e5620e50652a3fd"} Jan 22 05:18:35 crc kubenswrapper[4814]: W0122 05:18:35.355882 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:35 crc kubenswrapper[4814]: E0122 05:18:35.355967 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:35 crc kubenswrapper[4814]: E0122 05:18:35.677979 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="1.6s" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.920525 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.921793 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.921837 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.921851 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:35 crc kubenswrapper[4814]: I0122 05:18:35.921885 4814 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:18:35 crc kubenswrapper[4814]: E0122 05:18:35.922395 4814 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.110:6443: connect: connection refused" node="crc" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.258207 4814 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.263173 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 09:52:14.132258947 +0000 UTC Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.335743 4814 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 05:18:36 crc kubenswrapper[4814]: E0122 05:18:36.337609 4814 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.110:6443: connect: connection refused" logger="UnhandledError" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.357148 4814 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8" exitCode=0 Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.357220 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8"} Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.359117 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b"} Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.359175 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5"} Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.360810 4814 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df" exitCode=0 Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.361005 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.360863 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df"} Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.366262 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.366475 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.366615 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.367065 4814 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a8fc5a3cf363a1973ca5720720828e85d0116d2ed8e531c26f8d835a0cec4da1" exitCode=0 Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.367175 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a8fc5a3cf363a1973ca5720720828e85d0116d2ed8e531c26f8d835a0cec4da1"} Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.367219 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.368673 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.368719 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.368735 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.374673 4814 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290" exitCode=0 Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.374719 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290"} Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.374833 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.376377 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.376411 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.376423 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.379669 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.380448 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.380473 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:36 crc kubenswrapper[4814]: I0122 05:18:36.380483 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.257316 4814 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.110:6443: connect: connection refused Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.264137 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 14:17:27.677971951 +0000 UTC Jan 22 05:18:37 crc kubenswrapper[4814]: E0122 05:18:37.279331 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="3.2s" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.384162 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.384206 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.384219 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.384228 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.388997 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.389053 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.389704 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.394137 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.394183 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.394196 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.407097 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.407148 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.407161 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.407172 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.411038 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.411079 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.411092 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.413909 4814 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a0b93b4d2de5fef338fee307076e0067fc78d3b9476a23e1d308b5cb7d90cdf2" exitCode=0 Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.413961 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a0b93b4d2de5fef338fee307076e0067fc78d3b9476a23e1d308b5cb7d90cdf2"} Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.414020 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.414046 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.414949 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.414977 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.414988 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.415898 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.415926 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.415938 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.522791 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.526551 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.526593 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.526603 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:37 crc kubenswrapper[4814]: I0122 05:18:37.526654 4814 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.264444 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 14:04:23.110194412 +0000 UTC Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.420536 4814 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f74b0b953a158b9218203487e86cbc43de48f7d4a8af8d52574d447f8df7f6cc" exitCode=0 Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.420617 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f74b0b953a158b9218203487e86cbc43de48f7d4a8af8d52574d447f8df7f6cc"} Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.420718 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.422117 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.422171 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.422190 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.427225 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832"} Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.427297 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.428733 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.428776 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.428793 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.430841 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.430898 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.431498 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"cff7a5a68e2e3e125fb9198ce7dcf1b2c7470941e9bfe4206439e2c21f409f4a"} Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.431589 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.432220 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433216 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433252 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433284 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433301 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433256 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433369 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433261 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433503 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:38 crc kubenswrapper[4814]: I0122 05:18:38.433521 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.178041 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.266310 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 06:05:45.681297927 +0000 UTC Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.323356 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.438232 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a2c0dd8b1278b2430af8f7379df8f100a535e6d3ec5a710ec363e331b40b3a4b"} Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.438307 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fe52e56bcd1d30368e34d5a587682eb161d41c44e0787420e3ce1b1529554ffc"} Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.438338 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c8ace30af3c2e1ddb9af85c8329ecc7aa69d3ff5a146c7c5aea8aa577c456edc"} Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.438405 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.438472 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.438566 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.440093 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.440161 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.440187 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.441545 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.441622 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.441735 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.707787 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.708040 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.709490 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.709567 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:39 crc kubenswrapper[4814]: I0122 05:18:39.709587 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.267255 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 08:31:37.887174496 +0000 UTC Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.446621 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d95859d5d070454156eaaaee509edb3d437c785345000f46c62f1f97bdea6031"} Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.446745 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0fadd2efd9681119b86d9bee5366c8b5c70f3675595af1a5a81061c81cba4e6d"} Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.446694 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.446694 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.448055 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.448129 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.448151 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.448600 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.448659 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.448676 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:40 crc kubenswrapper[4814]: I0122 05:18:40.541411 4814 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.267877 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 18:22:11.965967338 +0000 UTC Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.404291 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.404508 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.406049 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.406105 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.406124 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.449086 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.450350 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.450397 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:41 crc kubenswrapper[4814]: I0122 05:18:41.450418 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.179088 4814 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.179197 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.268312 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 05:03:34.918423504 +0000 UTC Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.727299 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.727677 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.729433 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.729491 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.729509 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.802493 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.802719 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.804345 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.804409 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:42 crc kubenswrapper[4814]: I0122 05:18:42.804428 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:43 crc kubenswrapper[4814]: I0122 05:18:43.268854 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 09:16:18.269984603 +0000 UTC Jan 22 05:18:43 crc kubenswrapper[4814]: I0122 05:18:43.866978 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:43 crc kubenswrapper[4814]: I0122 05:18:43.867203 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:43 crc kubenswrapper[4814]: I0122 05:18:43.869123 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:43 crc kubenswrapper[4814]: I0122 05:18:43.869172 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:43 crc kubenswrapper[4814]: I0122 05:18:43.869771 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:43 crc kubenswrapper[4814]: I0122 05:18:43.875602 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:44 crc kubenswrapper[4814]: I0122 05:18:44.269407 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 06:59:52.602820802 +0000 UTC Jan 22 05:18:44 crc kubenswrapper[4814]: E0122 05:18:44.424229 4814 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 22 05:18:44 crc kubenswrapper[4814]: I0122 05:18:44.458136 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:44 crc kubenswrapper[4814]: I0122 05:18:44.459354 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:44 crc kubenswrapper[4814]: I0122 05:18:44.459408 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:44 crc kubenswrapper[4814]: I0122 05:18:44.459442 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:45 crc kubenswrapper[4814]: I0122 05:18:45.270578 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 13:26:11.963614756 +0000 UTC Jan 22 05:18:46 crc kubenswrapper[4814]: I0122 05:18:46.271755 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 02:25:37.264269995 +0000 UTC Jan 22 05:18:46 crc kubenswrapper[4814]: I0122 05:18:46.686018 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:46 crc kubenswrapper[4814]: I0122 05:18:46.686261 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:46 crc kubenswrapper[4814]: I0122 05:18:46.688185 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:46 crc kubenswrapper[4814]: I0122 05:18:46.688249 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:46 crc kubenswrapper[4814]: I0122 05:18:46.688269 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:46 crc kubenswrapper[4814]: I0122 05:18:46.693340 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:47 crc kubenswrapper[4814]: I0122 05:18:47.272231 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 13:29:55.844921873 +0000 UTC Jan 22 05:18:47 crc kubenswrapper[4814]: I0122 05:18:47.468108 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:47 crc kubenswrapper[4814]: I0122 05:18:47.469096 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:47 crc kubenswrapper[4814]: I0122 05:18:47.469166 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:47 crc kubenswrapper[4814]: I0122 05:18:47.469191 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:47 crc kubenswrapper[4814]: W0122 05:18:47.519893 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 22 05:18:47 crc kubenswrapper[4814]: I0122 05:18:47.519990 4814 trace.go:236] Trace[1978920793]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 05:18:37.517) (total time: 10002ms): Jan 22 05:18:47 crc kubenswrapper[4814]: Trace[1978920793]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10002ms (05:18:47.519) Jan 22 05:18:47 crc kubenswrapper[4814]: Trace[1978920793]: [10.002511201s] [10.002511201s] END Jan 22 05:18:47 crc kubenswrapper[4814]: E0122 05:18:47.520015 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 22 05:18:47 crc kubenswrapper[4814]: E0122 05:18:47.527457 4814 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Jan 22 05:18:47 crc kubenswrapper[4814]: W0122 05:18:47.927537 4814 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Jan 22 05:18:47 crc kubenswrapper[4814]: I0122 05:18:47.927697 4814 trace.go:236] Trace[509352175]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 05:18:37.925) (total time: 10002ms): Jan 22 05:18:47 crc kubenswrapper[4814]: Trace[509352175]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (05:18:47.927) Jan 22 05:18:47 crc kubenswrapper[4814]: Trace[509352175]: [10.002087325s] [10.002087325s] END Jan 22 05:18:47 crc kubenswrapper[4814]: E0122 05:18:47.927728 4814 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.009247 4814 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.009312 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.022528 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.022698 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.023930 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.023966 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.023978 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.052725 4814 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.052790 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.080981 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.273053 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 19:01:07.387452084 +0000 UTC Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.470515 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.472057 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.472119 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.472141 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:48 crc kubenswrapper[4814]: I0122 05:18:48.489297 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.273976 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 07:05:35.791562457 +0000 UTC Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.331714 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.331909 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.332415 4814 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.332482 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.332998 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.333027 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.333037 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.335071 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.473114 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.473162 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.473671 4814 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.473766 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.476829 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.476930 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.476961 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.479216 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.479291 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:49 crc kubenswrapper[4814]: I0122 05:18:49.479313 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:50 crc kubenswrapper[4814]: I0122 05:18:50.274613 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 13:21:11.56751208 +0000 UTC Jan 22 05:18:50 crc kubenswrapper[4814]: I0122 05:18:50.728465 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:50 crc kubenswrapper[4814]: I0122 05:18:50.730309 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:50 crc kubenswrapper[4814]: I0122 05:18:50.730370 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:50 crc kubenswrapper[4814]: I0122 05:18:50.730389 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:50 crc kubenswrapper[4814]: I0122 05:18:50.730424 4814 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:18:50 crc kubenswrapper[4814]: E0122 05:18:50.736794 4814 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 22 05:18:51 crc kubenswrapper[4814]: I0122 05:18:51.275141 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 07:19:31.49409914 +0000 UTC Jan 22 05:18:52 crc kubenswrapper[4814]: I0122 05:18:52.178859 4814 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 05:18:52 crc kubenswrapper[4814]: I0122 05:18:52.178974 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 05:18:52 crc kubenswrapper[4814]: I0122 05:18:52.276326 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 03:37:37.678778174 +0000 UTC Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.009424 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.012689 4814 trace.go:236] Trace[652868816]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 05:18:38.037) (total time: 14975ms): Jan 22 05:18:53 crc kubenswrapper[4814]: Trace[652868816]: ---"Objects listed" error: 14975ms (05:18:53.012) Jan 22 05:18:53 crc kubenswrapper[4814]: Trace[652868816]: [14.975219154s] [14.975219154s] END Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.012718 4814 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.013005 4814 trace.go:236] Trace[652652560]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Jan-2026 05:18:38.293) (total time: 14719ms): Jan 22 05:18:53 crc kubenswrapper[4814]: Trace[652652560]: ---"Objects listed" error: 14719ms (05:18:53.012) Jan 22 05:18:53 crc kubenswrapper[4814]: Trace[652652560]: [14.719720701s] [14.719720701s] END Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.013042 4814 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.015680 4814 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.063451 4814 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.258523 4814 apiserver.go:52] "Watching apiserver" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.261407 4814 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.261787 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.262310 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.262380 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.262435 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.262713 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.262929 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.262964 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.262986 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.263023 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.263288 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.264517 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.264726 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.267980 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.268163 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.268699 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.269502 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.269652 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.269788 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.270026 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.274799 4814 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.277283 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 00:33:02.731314581 +0000 UTC Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317685 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317727 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317747 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317765 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317783 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317801 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317824 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317840 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317858 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317876 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317890 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317907 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317922 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317937 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317952 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.317976 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318015 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318032 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318048 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318067 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318081 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318098 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318118 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318134 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318155 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318184 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318208 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318224 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318240 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318254 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318270 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318284 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318299 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318314 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318330 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318346 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318361 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318385 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318401 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318416 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318433 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318449 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318463 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318478 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318493 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318507 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318522 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318537 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318551 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318567 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318583 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318599 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318615 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318648 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318665 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318681 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318696 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318714 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318730 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318747 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318763 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318779 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318794 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318824 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318842 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318857 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318875 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318891 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318909 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318955 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318971 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.318986 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319003 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319019 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319034 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319052 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319067 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319082 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319096 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319112 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319127 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319142 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319159 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319174 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319190 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319205 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319219 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319235 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319250 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319265 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319281 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319297 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319312 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319328 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319343 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319358 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319376 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319393 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319410 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319426 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319441 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319456 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319471 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319486 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319502 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319516 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319532 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319549 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319566 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319586 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319603 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319619 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319651 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319667 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319681 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319696 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319713 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319730 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319746 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319761 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319777 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319793 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319815 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319837 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319923 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319939 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319954 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319971 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.319986 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320004 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320019 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320035 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320068 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320087 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320103 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320119 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320138 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320156 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320173 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320191 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320207 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320222 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320237 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320253 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320269 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320285 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320303 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320321 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320336 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320352 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320386 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320402 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320418 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320434 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320450 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320468 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320484 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320499 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320515 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320531 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320547 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320564 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320581 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320597 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320614 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320649 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320667 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320684 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320701 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320718 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320733 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320751 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320767 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320785 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320807 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320830 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320848 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320864 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320882 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320900 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320916 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320933 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320949 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320967 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320983 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.320999 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321014 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321031 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321047 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321064 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321080 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321096 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321112 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321128 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321146 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321186 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321209 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321226 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321244 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321262 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321282 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321301 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321320 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321339 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321356 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321374 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321390 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321407 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.321425 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.322362 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.322557 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.322833 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.322942 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.322963 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.323194 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.323243 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.323556 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.323778 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.323871 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.323906 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.323979 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324156 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324175 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324308 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324418 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324535 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324677 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324736 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324753 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324920 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.324916 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325115 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325191 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325039 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325333 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325344 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325394 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325417 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325505 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325523 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325606 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325709 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325806 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.325896 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:18:53.82577895 +0000 UTC m=+19.909267165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.326066 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.326099 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.326160 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.326241 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.326707 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.326913 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.327219 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.328462 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.329147 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.329570 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.329881 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.330213 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.330457 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.330792 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.330765 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.330885 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.331018 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.331182 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.331189 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325946 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.331282 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.331502 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.331529 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.332127 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.333306 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.333731 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.334022 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.334092 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.334187 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.334337 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.334772 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.335037 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.335221 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.335599 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.335813 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.335896 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.336081 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.336215 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.336523 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.336717 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.341837 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.342160 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.342341 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.342756 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.342867 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.343269 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.343287 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.343510 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.343740 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.344006 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.346877 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.347441 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.347486 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.347548 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.347805 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.347979 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.348286 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.348420 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.348695 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.348712 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.348796 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.348860 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.348939 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.348949 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349023 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349075 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349162 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349264 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349306 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349331 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349393 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349519 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349923 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.349974 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.350075 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.350172 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.350388 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.367973 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.368087 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.368324 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.368401 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.368879 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.368931 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.369045 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.369176 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.325902 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.369394 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.369522 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.369830 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.369950 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.369712 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.371408 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.371534 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.371604 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.371968 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.372533 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.387003 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.387235 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.387477 4814 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.387551 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:53.887531494 +0000 UTC m=+19.971019699 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.387601 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.387819 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.387858 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.388208 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.388598 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.389098 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.389515 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.390051 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.390594 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.391805 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.391889 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.392085 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.393812 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.397161 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.398620 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.398872 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.398893 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.403001 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.403856 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.404215 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.404754 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.405085 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.405126 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.405172 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.405251 4814 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.405457 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.405667 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.405827 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.406137 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.406215 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.407581 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.408084 4814 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.408482 4814 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.408572 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:53.908553539 +0000 UTC m=+19.992041754 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.409697 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.410050 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.410370 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.411478 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.416488 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.416661 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.417303 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.420169 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.420161 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.421082 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.421427 4814 csr.go:261] certificate signing request csr-7b6x4 is approved, waiting to be issued Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.422476 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.422971 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.424861 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.425142 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.425175 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.425352 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.425618 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.425745 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.425779 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.425888 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.432836 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.432923 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.433072 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.433902 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.434259 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.434286 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.434304 4814 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.434483 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.434685 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.438436 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.438685 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.440971 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.441127 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.441220 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:53.941197715 +0000 UTC m=+20.024685930 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.441351 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.441430 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.441594 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.441730 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442301 4814 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442328 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442340 4814 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442351 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442364 4814 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442376 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442387 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442396 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442409 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442419 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442427 4814 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442437 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442448 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442460 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442469 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442482 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442494 4814 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442504 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442513 4814 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442525 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442535 4814 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442544 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442554 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442566 4814 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442576 4814 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442585 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442595 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442607 4814 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.442617 4814 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447093 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.445086 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.445746 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447112 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447183 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447199 4814 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447213 4814 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447229 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447243 4814 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447256 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447271 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447281 4814 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447292 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447302 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447314 4814 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447323 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447333 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447342 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447353 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447362 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447373 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447383 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447397 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447409 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447421 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447433 4814 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447443 4814 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447453 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447463 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.445665 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447475 4814 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447484 4814 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447494 4814 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447506 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447522 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447531 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447540 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447552 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447563 4814 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447573 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447584 4814 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447596 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447604 4814 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447613 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.445179 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447639 4814 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447716 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447733 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447748 4814 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447767 4814 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447781 4814 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447795 4814 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447811 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447829 4814 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447845 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447862 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447879 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447892 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447908 4814 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447921 4814 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447939 4814 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447953 4814 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447968 4814 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.447983 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448003 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448019 4814 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448035 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448051 4814 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448091 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448105 4814 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448120 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448136 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448150 4814 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448164 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448177 4814 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448194 4814 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448206 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448218 4814 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448231 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448249 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448269 4814 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448283 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448300 4814 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448314 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448327 4814 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448340 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448356 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448371 4814 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448384 4814 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448397 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448414 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448430 4814 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448444 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448461 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448473 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448485 4814 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448498 4814 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448514 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448527 4814 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448538 4814 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448549 4814 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448565 4814 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448578 4814 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448590 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448601 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448616 4814 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448677 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448690 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448707 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448722 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448737 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448750 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448766 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448777 4814 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448789 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448802 4814 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448819 4814 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448833 4814 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448844 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448859 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448872 4814 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448884 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448895 4814 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448910 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448923 4814 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448934 4814 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448945 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448960 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448971 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448983 4814 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.448993 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.449009 4814 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.449019 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.451349 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.453343 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.453480 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.457866 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.458130 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.458656 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.460294 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.460806 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.461114 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.461148 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.461169 4814 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.461253 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:53.961221289 +0000 UTC m=+20.044709504 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.474562 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.484801 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.510078 4814 csr.go:257] certificate signing request csr-7b6x4 is issued Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.520834 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.537120 4814 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.547709 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550083 4814 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550210 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550275 4814 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550332 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550386 4814 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550443 4814 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550519 4814 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550575 4814 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550647 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550704 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.550958 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551014 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551108 4814 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551174 4814 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551236 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551292 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551345 4814 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551405 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551459 4814 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551534 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551588 4814 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551660 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551716 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551779 4814 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551835 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551893 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.551952 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552007 4814 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552094 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552158 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552216 4814 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552271 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552361 4814 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552419 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552471 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.552531 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.559355 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-5gzfx"] Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.560299 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.562905 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.563107 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-vnl4q"] Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.563493 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vnl4q" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.564405 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.564882 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.565345 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.568049 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.568052 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.568708 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.575422 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.584268 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.586848 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.595121 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.617256 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: W0122 05:18:53.641845 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-88efba6081afb4e931a491fa574d22953255c0fb5a09fd04441f5cf57dfd02ac WatchSource:0}: Error finding container 88efba6081afb4e931a491fa574d22953255c0fb5a09fd04441f5cf57dfd02ac: Status 404 returned error can't find the container with id 88efba6081afb4e931a491fa574d22953255c0fb5a09fd04441f5cf57dfd02ac Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.653209 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2887a737-4338-4fc7-a621-c4d9e74c05ca-host\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.653263 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2887a737-4338-4fc7-a621-c4d9e74c05ca-serviceca\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.653295 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1333dbf9-2055-429e-89b1-463b28cff79c-hosts-file\") pod \"node-resolver-vnl4q\" (UID: \"1333dbf9-2055-429e-89b1-463b28cff79c\") " pod="openshift-dns/node-resolver-vnl4q" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.653311 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgf4r\" (UniqueName: \"kubernetes.io/projected/1333dbf9-2055-429e-89b1-463b28cff79c-kube-api-access-qgf4r\") pod \"node-resolver-vnl4q\" (UID: \"1333dbf9-2055-429e-89b1-463b28cff79c\") " pod="openshift-dns/node-resolver-vnl4q" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.653335 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kqgt\" (UniqueName: \"kubernetes.io/projected/2887a737-4338-4fc7-a621-c4d9e74c05ca-kube-api-access-4kqgt\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.653542 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.703984 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.736829 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.754207 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1333dbf9-2055-429e-89b1-463b28cff79c-hosts-file\") pod \"node-resolver-vnl4q\" (UID: \"1333dbf9-2055-429e-89b1-463b28cff79c\") " pod="openshift-dns/node-resolver-vnl4q" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.754615 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgf4r\" (UniqueName: \"kubernetes.io/projected/1333dbf9-2055-429e-89b1-463b28cff79c-kube-api-access-qgf4r\") pod \"node-resolver-vnl4q\" (UID: \"1333dbf9-2055-429e-89b1-463b28cff79c\") " pod="openshift-dns/node-resolver-vnl4q" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.754769 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kqgt\" (UniqueName: \"kubernetes.io/projected/2887a737-4338-4fc7-a621-c4d9e74c05ca-kube-api-access-4kqgt\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.754910 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2887a737-4338-4fc7-a621-c4d9e74c05ca-host\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.755012 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2887a737-4338-4fc7-a621-c4d9e74c05ca-serviceca\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.754581 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1333dbf9-2055-429e-89b1-463b28cff79c-hosts-file\") pod \"node-resolver-vnl4q\" (UID: \"1333dbf9-2055-429e-89b1-463b28cff79c\") " pod="openshift-dns/node-resolver-vnl4q" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.755361 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2887a737-4338-4fc7-a621-c4d9e74c05ca-host\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.756437 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2887a737-4338-4fc7-a621-c4d9e74c05ca-serviceca\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.792421 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.809389 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgf4r\" (UniqueName: \"kubernetes.io/projected/1333dbf9-2055-429e-89b1-463b28cff79c-kube-api-access-qgf4r\") pod \"node-resolver-vnl4q\" (UID: \"1333dbf9-2055-429e-89b1-463b28cff79c\") " pod="openshift-dns/node-resolver-vnl4q" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.816076 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kqgt\" (UniqueName: \"kubernetes.io/projected/2887a737-4338-4fc7-a621-c4d9e74c05ca-kube-api-access-4kqgt\") pod \"node-ca-5gzfx\" (UID: \"2887a737-4338-4fc7-a621-c4d9e74c05ca\") " pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.829079 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.856007 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.856229 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:18:54.856211075 +0000 UTC m=+20.939699280 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.868946 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.873938 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-5gzfx" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.886868 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vnl4q" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.922778 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.947225 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.957280 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.957340 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.957372 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.957497 4814 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.957510 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.957543 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.957556 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:54.957541644 +0000 UTC m=+21.041029859 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.957556 4814 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.957598 4814 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.957638 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:54.957604746 +0000 UTC m=+21.041092961 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:53 crc kubenswrapper[4814]: E0122 05:18:53.957676 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:54.957666188 +0000 UTC m=+21.041154403 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.971118 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:53 crc kubenswrapper[4814]: I0122 05:18:53.989398 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.003735 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.018505 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.059192 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.059323 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.059362 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.059372 4814 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.059415 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:55.05940249 +0000 UTC m=+21.142890705 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.165980 4814 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.166162 4814 reflector.go:484] object-"openshift-network-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.166193 4814 reflector.go:484] object-"openshift-network-node-identity"/"ovnkube-identity-cm": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"ovnkube-identity-cm": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.166211 4814 reflector.go:484] object-"openshift-network-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.166234 4814 reflector.go:484] object-"openshift-dns"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.166285 4814 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/events\": read tcp 38.102.83.110:43140->38.102.83.110:6443: use of closed network connection" event="&Event{ObjectMeta:{network-node-identity-vrzqb.188cf5e8cfc48af1 openshift-network-node-identity 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-network-node-identity,Name:network-node-identity-vrzqb,UID:ef543e1b-8068-4ea3-b32a-61027b32e95d,APIVersion:v1,ResourceVersion:25324,FieldPath:spec.containers{webhook},},Reason:Started,Message:Started container webhook,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 05:18:53.916744433 +0000 UTC m=+20.000232648,LastTimestamp:2026-01-22 05:18:53.916744433 +0000 UTC m=+20.000232648,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.166406 4814 reflector.go:484] object-"openshift-image-registry"/"image-registry-certificates": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"image-registry-certificates": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.166426 4814 reflector.go:484] object-"openshift-image-registry"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167137 4814 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.RuntimeClass ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167159 4814 reflector.go:484] object-"openshift-network-node-identity"/"env-overrides": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"env-overrides": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167168 4814 reflector.go:484] object-"openshift-dns"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167150 4814 reflector.go:484] object-"openshift-network-node-identity"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167195 4814 reflector.go:484] object-"openshift-network-operator"/"metrics-tls": watch of *v1.Secret ended with: very short watch: object-"openshift-network-operator"/"metrics-tls": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167210 4814 reflector.go:484] object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": watch of *v1.Secret ended with: very short watch: object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167218 4814 reflector.go:484] object-"openshift-network-operator"/"iptables-alerter-script": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"iptables-alerter-script": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167246 4814 reflector.go:484] object-"openshift-image-registry"/"node-ca-dockercfg-4777p": watch of *v1.Secret ended with: very short watch: object-"openshift-image-registry"/"node-ca-dockercfg-4777p": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167248 4814 reflector.go:484] object-"openshift-network-node-identity"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167249 4814 reflector.go:484] object-"openshift-image-registry"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167323 4814 reflector.go:484] object-"openshift-network-node-identity"/"network-node-identity-cert": watch of *v1.Secret ended with: very short watch: object-"openshift-network-node-identity"/"network-node-identity-cert": Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: W0122 05:18:54.167349 4814 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.CSIDriver ended with: very short watch: k8s.io/client-go/informers/factory.go:160: Unexpected watch close - watch lasted less than a second and no items received Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.277964 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 02:12:55.218813149 +0000 UTC Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.343394 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.343593 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.347549 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.348081 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.348891 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.349457 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.349992 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.350446 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.352233 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.352985 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.354010 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.354545 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.355479 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.356175 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.357052 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.357609 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.360374 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.361042 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.362070 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.362499 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.363447 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.364126 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.365207 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.366712 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.367175 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.368222 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.368727 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.369848 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.370498 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.371433 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.372050 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.372976 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.373459 4814 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.373608 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.375523 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.378421 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.378938 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.380471 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.381479 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.382027 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.383116 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.383810 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.384715 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.385320 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.386286 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.387000 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.387842 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.388400 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.389286 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.390145 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.391016 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.391495 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.393345 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.393910 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.394929 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.395421 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.397269 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.440941 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.467847 4814 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47510->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.468031 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:47510->192.168.126.11:17697: read: connection reset by peer" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.487945 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"43eb3b97db036f089b4696c779952bcc3a7ac1e4804fe6ef75a765f4016bd726"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.489315 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-5gzfx" event={"ID":"2887a737-4338-4fc7-a621-c4d9e74c05ca","Type":"ContainerStarted","Data":"81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.489337 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-5gzfx" event={"ID":"2887a737-4338-4fc7-a621-c4d9e74c05ca","Type":"ContainerStarted","Data":"7182d56345c2f6023fd597df3e1f46a6898b6f7d157462a373e174c16c5e6078"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.491198 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.491221 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.491231 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"88efba6081afb4e931a491fa574d22953255c0fb5a09fd04441f5cf57dfd02ac"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.492550 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.492573 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"577512a80525e6af74c4363d91504d74697177450bbcc37044a850be801af2c2"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.499879 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vnl4q" event={"ID":"1333dbf9-2055-429e-89b1-463b28cff79c","Type":"ContainerStarted","Data":"f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.500001 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vnl4q" event={"ID":"1333dbf9-2055-429e-89b1-463b28cff79c","Type":"ContainerStarted","Data":"57592a747ff2cdae011c189453142a56a9f582f8507b176a81ce85a00d936d3a"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.500914 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.502247 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.504132 4814 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832" exitCode=255 Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.504172 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832"} Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.511185 4814 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-22 05:13:53 +0000 UTC, rotation deadline is 2026-12-01 02:31:33.697030604 +0000 UTC Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.511291 4814 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7509h12m39.185743911s for next certificate rotation Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.530282 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.581261 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.594318 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.597854 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.599025 4814 scope.go:117] "RemoveContainer" containerID="5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.639730 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.670441 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.673373 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-f57bg"] Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.673843 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.675484 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-gpk6m"] Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.676009 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-rq55l"] Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.676138 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.676230 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.677386 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.677412 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.677539 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.677663 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.678014 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.678243 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.679408 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.680573 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.682305 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.682707 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.682851 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.682953 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.708318 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.731619 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.757149 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771564 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-multus-certs\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771596 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tsv9\" (UniqueName: \"kubernetes.io/projected/362cbfbe-caa3-40b7-906c-80c378b01e0c-kube-api-access-4tsv9\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771651 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/43672f8e-58cc-4665-840f-6477e084f0dd-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771669 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-system-cni-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771684 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-cnibin\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771699 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55chl\" (UniqueName: \"kubernetes.io/projected/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-kube-api-access-55chl\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771715 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-cni-binary-copy\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771742 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/43672f8e-58cc-4665-840f-6477e084f0dd-cni-binary-copy\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771757 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-cni-bin\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771770 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-hostroot\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771798 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-cni-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771868 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-netns\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771932 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-socket-dir-parent\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771952 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-cni-multus\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771968 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/362cbfbe-caa3-40b7-906c-80c378b01e0c-mcd-auth-proxy-config\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.771984 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-etc-kubernetes\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772004 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772017 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-os-release\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772034 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-daemon-config\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772048 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/362cbfbe-caa3-40b7-906c-80c378b01e0c-proxy-tls\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772062 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-system-cni-dir\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772078 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-cnibin\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772095 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-os-release\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772110 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-k8s-cni-cncf-io\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772144 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c57bv\" (UniqueName: \"kubernetes.io/projected/43672f8e-58cc-4665-840f-6477e084f0dd-kube-api-access-c57bv\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772162 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-kubelet\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772176 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-conf-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.772192 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/362cbfbe-caa3-40b7-906c-80c378b01e0c-rootfs\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.787457 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.802421 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.819368 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.836527 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.845808 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.870227 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.872509 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.872725 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:18:56.872695735 +0000 UTC m=+22.956183950 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.872843 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/43672f8e-58cc-4665-840f-6477e084f0dd-cni-binary-copy\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.872942 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-cni-bin\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873037 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-hostroot\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873194 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-cni-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873465 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-netns\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873589 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-socket-dir-parent\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873428 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-cni-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873100 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-hostroot\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873553 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/43672f8e-58cc-4665-840f-6477e084f0dd-cni-binary-copy\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873561 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-netns\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873068 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-cni-bin\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873698 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-cni-multus\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873803 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-socket-dir-parent\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873818 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/362cbfbe-caa3-40b7-906c-80c378b01e0c-mcd-auth-proxy-config\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873935 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-etc-kubernetes\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873961 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.873985 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-os-release\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874005 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-daemon-config\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874031 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/362cbfbe-caa3-40b7-906c-80c378b01e0c-proxy-tls\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874051 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-system-cni-dir\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874072 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-cnibin\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874089 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-os-release\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874106 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-k8s-cni-cncf-io\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874146 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c57bv\" (UniqueName: \"kubernetes.io/projected/43672f8e-58cc-4665-840f-6477e084f0dd-kube-api-access-c57bv\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874168 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-kubelet\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874188 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/362cbfbe-caa3-40b7-906c-80c378b01e0c-rootfs\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874203 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-conf-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874225 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-multus-certs\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874247 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tsv9\" (UniqueName: \"kubernetes.io/projected/362cbfbe-caa3-40b7-906c-80c378b01e0c-kube-api-access-4tsv9\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874280 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/43672f8e-58cc-4665-840f-6477e084f0dd-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874295 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/362cbfbe-caa3-40b7-906c-80c378b01e0c-mcd-auth-proxy-config\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874307 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-system-cni-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874317 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874341 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-cnibin\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874361 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-system-cni-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874369 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-etc-kubernetes\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874364 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55chl\" (UniqueName: \"kubernetes.io/projected/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-kube-api-access-55chl\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874401 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-cni-binary-copy\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874410 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-cnibin\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874611 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-multus-certs\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874827 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-conf-dir\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874849 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-kubelet\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874867 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/362cbfbe-caa3-40b7-906c-80c378b01e0c-rootfs\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874898 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-system-cni-dir\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.874920 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-os-release\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.875012 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-multus-daemon-config\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.875045 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-run-k8s-cni-cncf-io\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.875246 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-cnibin\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.875389 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/43672f8e-58cc-4665-840f-6477e084f0dd-os-release\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.875448 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/43672f8e-58cc-4665-840f-6477e084f0dd-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.876020 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-cni-binary-copy\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.876136 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-host-var-lib-cni-multus\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.880645 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/362cbfbe-caa3-40b7-906c-80c378b01e0c-proxy-tls\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.888669 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.897215 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tsv9\" (UniqueName: \"kubernetes.io/projected/362cbfbe-caa3-40b7-906c-80c378b01e0c-kube-api-access-4tsv9\") pod \"machine-config-daemon-f57bg\" (UID: \"362cbfbe-caa3-40b7-906c-80c378b01e0c\") " pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.898958 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c57bv\" (UniqueName: \"kubernetes.io/projected/43672f8e-58cc-4665-840f-6477e084f0dd-kube-api-access-c57bv\") pod \"multus-additional-cni-plugins-gpk6m\" (UID: \"43672f8e-58cc-4665-840f-6477e084f0dd\") " pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.907029 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55chl\" (UniqueName: \"kubernetes.io/projected/22017d22-7b4d-4e3d-bbae-ff564c64bd7b-kube-api-access-55chl\") pod \"multus-rq55l\" (UID: \"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\") " pod="openshift-multus/multus-rq55l" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.911305 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.940160 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.956730 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.970948 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.975293 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.975461 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.975574 4814 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.975592 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.975652 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.975669 4814 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.975676 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:56.975653234 +0000 UTC m=+23.059141449 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.975750 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:56.975720486 +0000 UTC m=+23.059208701 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.975945 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.976161 4814 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:18:54 crc kubenswrapper[4814]: E0122 05:18:54.976357 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:56.976334654 +0000 UTC m=+23.059822879 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.989562 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:54 crc kubenswrapper[4814]: I0122 05:18:54.992555 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:18:55 crc kubenswrapper[4814]: W0122 05:18:55.002832 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod362cbfbe_caa3_40b7_906c_80c378b01e0c.slice/crio-4b8d2f3b34689904a9c6e8c66a4a28534154f9520930b6f1de9c01f18432edaf WatchSource:0}: Error finding container 4b8d2f3b34689904a9c6e8c66a4a28534154f9520930b6f1de9c01f18432edaf: Status 404 returned error can't find the container with id 4b8d2f3b34689904a9c6e8c66a4a28534154f9520930b6f1de9c01f18432edaf Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.003896 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rq55l" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.012514 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.013603 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.014504 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.019923 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.024482 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: W0122 05:18:55.051124 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43672f8e_58cc_4665_840f_6477e084f0dd.slice/crio-10b0a9d59750eed88de05fdb741c45e38ba7ed1889c085273f4c29aba514b477 WatchSource:0}: Error finding container 10b0a9d59750eed88de05fdb741c45e38ba7ed1889c085273f4c29aba514b477: Status 404 returned error can't find the container with id 10b0a9d59750eed88de05fdb741c45e38ba7ed1889c085273f4c29aba514b477 Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.054773 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.056478 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.068206 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.076941 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:18:55 crc kubenswrapper[4814]: E0122 05:18:55.077133 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:18:55 crc kubenswrapper[4814]: E0122 05:18:55.077167 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:18:55 crc kubenswrapper[4814]: E0122 05:18:55.077183 4814 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:55 crc kubenswrapper[4814]: E0122 05:18:55.077244 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:18:57.077226101 +0000 UTC m=+23.160714316 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.078250 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wvzgj"] Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.079330 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.082246 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.086149 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.086262 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.086403 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.086461 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.086571 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.086579 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.093705 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.098749 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.103073 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.108835 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.123206 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.137206 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.149960 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.155025 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.171175 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.171653 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178143 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-kubelet\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178215 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-ovn\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178248 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178273 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-log-socket\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178350 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-node-log\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178373 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-ovn-kubernetes\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178460 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-script-lib\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178567 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5scm\" (UniqueName: \"kubernetes.io/projected/55649399-9fd6-4e9a-b249-ce01b498c626-kube-api-access-q5scm\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178691 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-bin\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178740 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55649399-9fd6-4e9a-b249-ce01b498c626-ovn-node-metrics-cert\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178972 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-systemd\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.178994 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-netd\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.179018 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-env-overrides\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.179040 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.179063 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-systemd-units\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.179106 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-slash\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.179135 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-var-lib-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.179170 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-etc-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.179191 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-netns\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.179212 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-config\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.185613 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.198428 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.212102 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.212292 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.227103 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.249685 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.270739 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.278743 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 17:24:30.314900558 +0000 UTC Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.279901 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-etc-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.279960 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-netns\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.279978 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-config\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280000 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-ovn\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280020 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-kubelet\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280012 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-etc-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280039 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280095 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280102 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-log-socket\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280135 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-ovn\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280152 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-node-log\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280159 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-kubelet\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280173 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-ovn-kubernetes\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280183 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-netns\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280192 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-script-lib\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280208 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-node-log\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280219 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5scm\" (UniqueName: \"kubernetes.io/projected/55649399-9fd6-4e9a-b249-ce01b498c626-kube-api-access-q5scm\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280231 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-log-socket\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280258 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-bin\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280260 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-ovn-kubernetes\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280279 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55649399-9fd6-4e9a-b249-ce01b498c626-ovn-node-metrics-cert\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280299 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-systemd\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280314 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-netd\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280335 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-env-overrides\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280365 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280384 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-systemd-units\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280407 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-slash\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280424 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-var-lib-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280475 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-var-lib-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.280498 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-bin\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.281149 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-script-lib\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.281166 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-config\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.281212 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-systemd-units\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.281237 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-openvswitch\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.281192 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-systemd\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.281275 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-slash\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.281276 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-netd\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.281787 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-env-overrides\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.285563 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55649399-9fd6-4e9a-b249-ce01b498c626-ovn-node-metrics-cert\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.298644 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.314989 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5scm\" (UniqueName: \"kubernetes.io/projected/55649399-9fd6-4e9a-b249-ce01b498c626-kube-api-access-q5scm\") pod \"ovnkube-node-wvzgj\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.325759 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.338087 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.343428 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.343463 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:18:55 crc kubenswrapper[4814]: E0122 05:18:55.343575 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:18:55 crc kubenswrapper[4814]: E0122 05:18:55.343685 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.347022 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.367680 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.372074 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.388650 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.395206 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.399620 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:18:55 crc kubenswrapper[4814]: W0122 05:18:55.463829 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55649399_9fd6_4e9a_b249_ce01b498c626.slice/crio-4f190f7eddd52e6ead3de482c65ce4708567155b7b8ceeaa98c96c33646a2ef4 WatchSource:0}: Error finding container 4f190f7eddd52e6ead3de482c65ce4708567155b7b8ceeaa98c96c33646a2ef4: Status 404 returned error can't find the container with id 4f190f7eddd52e6ead3de482c65ce4708567155b7b8ceeaa98c96c33646a2ef4 Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.507767 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rq55l" event={"ID":"22017d22-7b4d-4e3d-bbae-ff564c64bd7b","Type":"ContainerStarted","Data":"1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc"} Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.507832 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rq55l" event={"ID":"22017d22-7b4d-4e3d-bbae-ff564c64bd7b","Type":"ContainerStarted","Data":"952981bf6fd923bf237b09e2d9250d9f380d806b6c880c7a9be02b2b2c390f0a"} Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.510296 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.511705 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b"} Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.512382 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.513494 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" event={"ID":"43672f8e-58cc-4665-840f-6477e084f0dd","Type":"ContainerStarted","Data":"10b0a9d59750eed88de05fdb741c45e38ba7ed1889c085273f4c29aba514b477"} Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.519485 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711"} Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.519546 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"4b8d2f3b34689904a9c6e8c66a4a28534154f9520930b6f1de9c01f18432edaf"} Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.527315 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"4f190f7eddd52e6ead3de482c65ce4708567155b7b8ceeaa98c96c33646a2ef4"} Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.532259 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.542545 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.544311 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.550109 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.562497 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.595169 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.617755 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.645411 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.669733 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.696356 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.720308 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.727123 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.771601 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.812278 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.858591 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.880554 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.941702 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.956456 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:55 crc kubenswrapper[4814]: I0122 05:18:55.984942 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.001289 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:55Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.028843 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.053741 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.091714 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.112514 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.145707 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.159361 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.171663 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.210328 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.256758 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.279172 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 18:43:51.462507886 +0000 UTC Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.343429 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:56 crc kubenswrapper[4814]: E0122 05:18:56.343555 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.532137 4814 generic.go:334] "Generic (PLEG): container finished" podID="43672f8e-58cc-4665-840f-6477e084f0dd" containerID="777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50" exitCode=0 Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.532198 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" event={"ID":"43672f8e-58cc-4665-840f-6477e084f0dd","Type":"ContainerDied","Data":"777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50"} Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.533934 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d"} Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.535239 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d" exitCode=0 Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.535317 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d"} Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.536963 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943"} Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.546017 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.565213 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.589047 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.623421 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.654276 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.714203 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.731581 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.801443 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.837453 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.901996 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:18:56 crc kubenswrapper[4814]: E0122 05:18:56.902506 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:19:00.90248155 +0000 UTC m=+26.985969755 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.912288 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.931986 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.951647 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.966321 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:56 crc kubenswrapper[4814]: I0122 05:18:56.999166 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:56Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.003179 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.003214 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.003250 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.003343 4814 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.003382 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:01.003370676 +0000 UTC m=+27.086858891 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.003483 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.003502 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.003513 4814 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.003536 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:01.003529841 +0000 UTC m=+27.087018056 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.003563 4814 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.003581 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:01.003576023 +0000 UTC m=+27.087064238 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.015970 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.031989 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.047424 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.070166 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.085295 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.103732 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.104171 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.104206 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.104221 4814 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.104282 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:01.104263253 +0000 UTC m=+27.187751488 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.104342 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.119880 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.137902 4814 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.139799 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.139850 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.139860 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.139971 4814 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.139974 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.147145 4814 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.147369 4814 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.148615 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.148654 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.148664 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.148679 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.148688 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.160490 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.174858 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.180988 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.182141 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.182168 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.182176 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.182191 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.182200 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.194921 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.196818 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.198156 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.198200 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.198208 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.198224 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.198235 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.211709 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.213689 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.217838 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.217869 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.217878 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.217895 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.217905 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.230458 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.235027 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.235119 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.235194 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.235269 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.235338 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.247142 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.247270 4814 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.248966 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.248996 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.249005 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.249020 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.249029 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.279739 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 17:36:59.733253035 +0000 UTC Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.343366 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.343404 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.343749 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:18:57 crc kubenswrapper[4814]: E0122 05:18:57.344598 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.350909 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.350941 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.350951 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.350967 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.350979 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.453615 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.453675 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.453687 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.453704 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.453717 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.544232 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.544546 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.544723 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.544890 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.545043 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.545163 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.546278 4814 generic.go:334] "Generic (PLEG): container finished" podID="43672f8e-58cc-4665-840f-6477e084f0dd" containerID="e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe" exitCode=0 Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.546340 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" event={"ID":"43672f8e-58cc-4665-840f-6477e084f0dd","Type":"ContainerDied","Data":"e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.555612 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.555712 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.555726 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.555745 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.555757 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.561293 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.580367 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.601660 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.618542 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.632470 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.647252 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.658855 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.658926 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.658941 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.658963 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.658975 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.674308 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.692113 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.711598 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.726246 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.738479 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.751674 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.760671 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.760706 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.760714 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.760727 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.760738 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.763922 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:57Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.864057 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.864110 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.864123 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.864141 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.864153 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.968005 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.968287 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.968367 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.968449 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:57 crc kubenswrapper[4814]: I0122 05:18:57.968540 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:57Z","lastTransitionTime":"2026-01-22T05:18:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.071495 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.072023 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.072082 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.072140 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.072236 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.175132 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.175360 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.175520 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.175617 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.175731 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.278932 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.279160 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.279268 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.279376 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.279456 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.279902 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 04:43:44.313018997 +0000 UTC Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.343559 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:18:58 crc kubenswrapper[4814]: E0122 05:18:58.343770 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.382932 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.383184 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.383268 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.383357 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.383438 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.487021 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.487065 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.487076 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.487094 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.487107 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.553574 4814 generic.go:334] "Generic (PLEG): container finished" podID="43672f8e-58cc-4665-840f-6477e084f0dd" containerID="a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b" exitCode=0 Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.553687 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" event={"ID":"43672f8e-58cc-4665-840f-6477e084f0dd","Type":"ContainerDied","Data":"a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.577871 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.589874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.589914 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.589928 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.590117 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.590130 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.601890 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.616016 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.630593 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.646427 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.666926 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.683041 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.695122 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.695212 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.695234 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.695267 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.695289 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.698733 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.714337 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.729685 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.742749 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.761679 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.791066 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:58Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.799210 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.799281 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.799304 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.799340 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.799363 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.902322 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.902370 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.902379 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.902397 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:58 crc kubenswrapper[4814]: I0122 05:18:58.902406 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:58Z","lastTransitionTime":"2026-01-22T05:18:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.005113 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.005166 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.005177 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.005197 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.005209 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.107647 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.107687 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.107697 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.107714 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.107723 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.183822 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.187466 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.192290 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.200817 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.210121 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.210156 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.210165 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.210179 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.210187 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.222013 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.233665 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.247297 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.264157 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.280474 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 00:58:46.204108966 +0000 UTC Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.280538 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.299594 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.312118 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.312185 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.312199 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.312224 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.312238 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.315758 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.331370 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.342673 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.342790 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:18:59 crc kubenswrapper[4814]: E0122 05:18:59.342832 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:18:59 crc kubenswrapper[4814]: E0122 05:18:59.342954 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.346449 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.363433 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.376222 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.393468 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.406663 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.414874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.414906 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.414915 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.414930 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.414940 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.418965 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.437279 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.457467 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.476018 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.489958 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.506516 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.516601 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.516666 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.516677 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.516692 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.516701 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.527815 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.541003 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.554198 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.558889 4814 generic.go:334] "Generic (PLEG): container finished" podID="43672f8e-58cc-4665-840f-6477e084f0dd" containerID="ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed" exitCode=0 Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.559000 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" event={"ID":"43672f8e-58cc-4665-840f-6477e084f0dd","Type":"ContainerDied","Data":"ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.563487 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.577028 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.594006 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.610005 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.619010 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.619059 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.619071 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.619088 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.619104 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.626755 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.640094 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.648798 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.660316 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.673150 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.689226 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.699108 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.708414 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.719978 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.721357 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.721451 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.721462 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.721481 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.721492 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.731842 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.744736 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.769588 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.810672 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.824036 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.824109 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.824130 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.824157 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.824175 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.850265 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.893232 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:18:59Z is after 2025-08-24T17:21:41Z" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.927337 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.927389 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.927399 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.927418 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:18:59 crc kubenswrapper[4814]: I0122 05:18:59.927433 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:18:59Z","lastTransitionTime":"2026-01-22T05:18:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.030237 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.030300 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.030314 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.030336 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.030352 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.133756 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.133827 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.133846 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.133883 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.133917 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.237825 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.237899 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.237919 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.237952 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.237974 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.280649 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 20:43:07.905595681 +0000 UTC Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.341360 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.341411 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.341424 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.341443 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.341454 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.343242 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:00 crc kubenswrapper[4814]: E0122 05:19:00.343424 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.444699 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.444750 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.444762 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.444779 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.444787 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.549062 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.549105 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.549117 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.549138 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.549183 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.572582 4814 generic.go:334] "Generic (PLEG): container finished" podID="43672f8e-58cc-4665-840f-6477e084f0dd" containerID="1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277" exitCode=0 Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.572655 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" event={"ID":"43672f8e-58cc-4665-840f-6477e084f0dd","Type":"ContainerDied","Data":"1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.590449 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.632713 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.654978 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.655020 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.655030 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.655047 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.655071 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.670572 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.688532 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.719297 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.741958 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.755048 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.758021 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.758060 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.758071 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.758088 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.758098 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.768541 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.785842 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.799119 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.822044 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.839847 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.857847 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.860957 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.860994 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.861004 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.861026 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.861037 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.871136 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:00Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.956711 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:19:00 crc kubenswrapper[4814]: E0122 05:19:00.956986 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:19:08.956947436 +0000 UTC m=+35.040435641 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.965310 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.965365 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.965378 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.965403 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:00 crc kubenswrapper[4814]: I0122 05:19:00.965420 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:00Z","lastTransitionTime":"2026-01-22T05:19:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.058304 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.058366 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.058425 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.058536 4814 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.058597 4814 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.058735 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.058750 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.058762 4814 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.058806 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:09.058586745 +0000 UTC m=+35.142074960 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.058824 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:09.058817872 +0000 UTC m=+35.142306087 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.058835 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:09.058830692 +0000 UTC m=+35.142318907 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.067858 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.067948 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.067975 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.068007 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.068044 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.160077 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.160418 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.160489 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.160518 4814 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.160690 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:09.160603185 +0000 UTC m=+35.244091430 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.170758 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.170852 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.170872 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.170942 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.170963 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.274797 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.274932 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.274957 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.274983 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.275031 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.281029 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 19:56:17.220279403 +0000 UTC Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.343594 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.343684 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.343837 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:01 crc kubenswrapper[4814]: E0122 05:19:01.344018 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.378197 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.378267 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.378286 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.378312 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.378331 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.526851 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.526912 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.526930 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.526957 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.526976 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.580657 4814 generic.go:334] "Generic (PLEG): container finished" podID="43672f8e-58cc-4665-840f-6477e084f0dd" containerID="13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14" exitCode=0 Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.580712 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" event={"ID":"43672f8e-58cc-4665-840f-6477e084f0dd","Type":"ContainerDied","Data":"13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.605420 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.630615 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.630683 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.630703 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.630733 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.630759 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.633266 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.649889 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.671037 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.703153 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.733738 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.733801 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.733815 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.733836 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.733853 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.740505 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.755992 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.772979 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.791368 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.806980 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.823619 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.835874 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.838402 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.838459 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.838476 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.838493 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.838503 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.849096 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.866161 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:01Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.940859 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.941027 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.941147 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.941275 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:01 crc kubenswrapper[4814]: I0122 05:19:01.941405 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:01Z","lastTransitionTime":"2026-01-22T05:19:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.045132 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.045175 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.045185 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.045202 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.045214 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.148146 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.148182 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.148191 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.148206 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.148219 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.251017 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.251065 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.251075 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.251094 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.251104 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.281931 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 16:06:02.004013207 +0000 UTC Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.344913 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:02 crc kubenswrapper[4814]: E0122 05:19:02.345092 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.354281 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.354356 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.354372 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.354396 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.354411 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.458593 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.458696 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.458712 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.458762 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.458779 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.561999 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.562075 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.562097 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.562128 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.562146 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.605375 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" event={"ID":"43672f8e-58cc-4665-840f-6477e084f0dd","Type":"ContainerStarted","Data":"f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.613184 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.613745 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.613854 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.638706 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.694132 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.694188 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.694200 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.694222 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.694234 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.710004 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.710423 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.711986 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.728869 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.741858 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.758247 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.780218 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.797108 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.797192 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.797211 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.797247 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.797272 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.806887 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.824116 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.840516 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.856516 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.873444 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.887888 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.901522 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.901597 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.901741 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.901852 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.901913 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:02Z","lastTransitionTime":"2026-01-22T05:19:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.911860 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.931316 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.949043 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.969540 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:02 crc kubenswrapper[4814]: I0122 05:19:02.986613 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.003014 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:02Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.004443 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.004489 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.004507 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.004537 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.004554 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.021517 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.042405 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.062107 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.078028 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.100445 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.107271 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.107339 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.107357 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.107386 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.107415 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.119944 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.142166 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.156661 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.169985 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.195934 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:03Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.210341 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.210383 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.210396 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.210414 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.210430 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.282328 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 16:54:36.267708616 +0000 UTC Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.313692 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.313767 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.313790 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.313849 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.313871 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.343210 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.343264 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:03 crc kubenswrapper[4814]: E0122 05:19:03.343416 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:03 crc kubenswrapper[4814]: E0122 05:19:03.343573 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.417297 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.417367 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.417384 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.417411 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.417437 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.520525 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.520605 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.520653 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.520683 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.520703 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.616509 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.623291 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.623347 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.623362 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.623386 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.623418 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.726207 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.726271 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.726290 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.726316 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.726333 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.829311 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.829347 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.829357 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.829372 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.829383 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.932068 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.932116 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.932129 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.932148 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:03 crc kubenswrapper[4814]: I0122 05:19:03.932160 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:03Z","lastTransitionTime":"2026-01-22T05:19:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.034606 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.034688 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.034701 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.034726 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.034739 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.136846 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.136916 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.136930 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.136947 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.136959 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.239572 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.239642 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.239654 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.239670 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.239680 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.283409 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 19:07:25.242796267 +0000 UTC Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.342229 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.342260 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.342269 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.342282 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.342294 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.342873 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:04 crc kubenswrapper[4814]: E0122 05:19:04.342975 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.357676 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.369492 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.382907 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.405804 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.432739 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.444453 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.444485 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.444495 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.444512 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.444525 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.447937 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.460706 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.486240 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.503768 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.534235 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.546287 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.546315 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.546327 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.546342 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.546353 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.549225 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.570685 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.583579 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.594294 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:04Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.618802 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.648856 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.648927 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.648947 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.648972 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.648992 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.750846 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.750903 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.750919 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.750941 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.750957 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.853886 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.853980 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.854062 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.854089 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.854174 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.883517 4814 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.958260 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.958326 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.958343 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.958368 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:04 crc kubenswrapper[4814]: I0122 05:19:04.958386 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:04Z","lastTransitionTime":"2026-01-22T05:19:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.061855 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.061927 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.061944 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.061968 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.061985 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.164562 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.164617 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.164662 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.164688 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.164706 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.267583 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.267701 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.267726 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.267755 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.267779 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.282793 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.283876 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 11:52:30.288497345 +0000 UTC Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.302852 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.320480 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.343770 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.343838 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:05 crc kubenswrapper[4814]: E0122 05:19:05.343983 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:05 crc kubenswrapper[4814]: E0122 05:19:05.344154 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.348451 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.371146 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.371191 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.371208 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.371228 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.371247 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.385822 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.412144 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.437731 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.473846 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.473893 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.473909 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.473935 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.473951 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.484345 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.499823 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.513233 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.528516 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.541347 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.557264 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.571369 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.576148 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.576181 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.576195 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.576216 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.576230 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.585684 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.623966 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/0.log" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.628199 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6" exitCode=1 Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.628272 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.629512 4814 scope.go:117] "RemoveContainer" containerID="597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.654188 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.676611 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.680816 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.680894 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.680919 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.680953 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.680978 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.694578 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.711073 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.724150 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.744495 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.775110 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:04Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670536 6019 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.670764 6019 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670849 6019 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.672609 6019 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:19:04.672667 6019 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:19:04.672689 6019 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 05:19:04.672702 6019 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 05:19:04.672725 6019 factory.go:656] Stopping watch factory\\\\nI0122 05:19:04.672754 6019 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 05:19:04.672768 6019 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:19:04.672787 6019 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:19:04.672799 6019 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.784143 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.784197 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.784211 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.784233 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.784247 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.792043 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.806554 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.819927 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.839544 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.854660 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.875334 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.886289 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.886352 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.886372 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.886449 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.886471 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.893266 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:05Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.989423 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.989485 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.989505 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.989531 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:05 crc kubenswrapper[4814]: I0122 05:19:05.989548 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:05Z","lastTransitionTime":"2026-01-22T05:19:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.092441 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.092525 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.092546 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.092580 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.092599 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.195114 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.195150 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.195161 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.195177 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.195188 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.284752 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 23:19:06.00334014 +0000 UTC Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.297651 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.297674 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.297682 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.297694 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.297704 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.343695 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:06 crc kubenswrapper[4814]: E0122 05:19:06.343870 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.399932 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.399985 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.399994 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.400013 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.400024 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.502062 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.502105 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.502117 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.502132 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.502141 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.566878 4814 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.604597 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.604684 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.604705 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.604736 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.604752 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.633558 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/0.log" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.636030 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.636142 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.647787 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.657391 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.679747 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.711107 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.711188 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.711209 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.711247 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.711267 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.732599 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:04Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670536 6019 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.670764 6019 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670849 6019 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.672609 6019 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:19:04.672667 6019 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:19:04.672689 6019 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 05:19:04.672702 6019 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 05:19:04.672725 6019 factory.go:656] Stopping watch factory\\\\nI0122 05:19:04.672754 6019 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 05:19:04.672768 6019 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:19:04.672787 6019 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:19:04.672799 6019 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.755213 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.776872 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.787974 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.797404 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.806740 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.813520 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.813543 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.813552 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.813565 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.813573 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.817643 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.829507 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.843500 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.853151 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.861311 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:06Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.916475 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.916544 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.916562 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.916588 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:06 crc kubenswrapper[4814]: I0122 05:19:06.916608 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:06Z","lastTransitionTime":"2026-01-22T05:19:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.019684 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.019762 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.019786 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.019820 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.019842 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.123312 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.123368 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.123386 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.123411 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.123429 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.226493 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.226573 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.226599 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.226672 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.226697 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.285946 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 10:05:45.150225082 +0000 UTC Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.330064 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.330143 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.330167 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.330198 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.330221 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.343981 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.344028 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.344199 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.344339 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.360329 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.360381 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.360399 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.360421 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.360439 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.381407 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.386041 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.386089 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.386107 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.386133 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.386150 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.408354 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.413340 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.413394 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.413413 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.413437 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.413455 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.434654 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.439361 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.439418 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.439443 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.439473 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.439495 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.459877 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.464777 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.464828 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.464845 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.464869 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.464887 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.486030 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.486255 4814 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.488511 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.488566 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.488584 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.488608 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.488650 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.591378 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.591434 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.591452 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.591481 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.591504 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.642233 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/1.log" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.643192 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/0.log" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.647663 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02" exitCode=1 Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.647709 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.647800 4814 scope.go:117] "RemoveContainer" containerID="597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.649070 4814 scope.go:117] "RemoveContainer" containerID="94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02" Jan 22 05:19:07 crc kubenswrapper[4814]: E0122 05:19:07.649403 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.679687 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.694329 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.694404 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.694423 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.694448 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.694466 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.703580 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.725707 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.749020 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.764574 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.789424 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.796948 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.797014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.797033 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.797058 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.797075 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.824694 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:04Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670536 6019 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.670764 6019 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670849 6019 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.672609 6019 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:19:04.672667 6019 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:19:04.672689 6019 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 05:19:04.672702 6019 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 05:19:04.672725 6019 factory.go:656] Stopping watch factory\\\\nI0122 05:19:04.672754 6019 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 05:19:04.672768 6019 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:19:04.672787 6019 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:19:04.672799 6019 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"ices.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:19:06.669529 6159 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: cert\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.840356 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.861614 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.879365 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.895919 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.900195 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.900249 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.900272 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.900301 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.900319 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:07Z","lastTransitionTime":"2026-01-22T05:19:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.913067 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.933519 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:07 crc kubenswrapper[4814]: I0122 05:19:07.948271 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:07Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.003665 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.003728 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.003746 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.003773 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.003790 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.106331 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.106369 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.106379 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.106397 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.106409 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.118315 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr"] Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.119250 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.121756 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.122944 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.138325 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.159253 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.182308 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.200611 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.208254 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.208296 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.208308 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.208333 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.208346 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.218762 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.238508 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.256928 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.278524 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.284057 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.284112 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrz8p\" (UniqueName: \"kubernetes.io/projected/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-kube-api-access-lrz8p\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.284162 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.284221 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-env-overrides\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.286934 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 05:23:24.29257407 +0000 UTC Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.304120 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.311024 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.311060 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.311070 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.311087 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.311098 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.324966 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.340591 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.343031 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:08 crc kubenswrapper[4814]: E0122 05:19:08.343252 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.357413 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.381724 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.385269 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.385332 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrz8p\" (UniqueName: \"kubernetes.io/projected/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-kube-api-access-lrz8p\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.385388 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.385432 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-env-overrides\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.386624 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.386778 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-env-overrides\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.397372 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.414243 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.414317 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.414342 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.414374 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.414399 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.417143 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrz8p\" (UniqueName: \"kubernetes.io/projected/78f0ef15-ba39-4f8f-b3df-7fb6671e7a79-kube-api-access-lrz8p\") pod \"ovnkube-control-plane-749d76644c-fxxmr\" (UID: \"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.418831 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:04Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670536 6019 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.670764 6019 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670849 6019 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.672609 6019 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:19:04.672667 6019 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:19:04.672689 6019 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 05:19:04.672702 6019 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 05:19:04.672725 6019 factory.go:656] Stopping watch factory\\\\nI0122 05:19:04.672754 6019 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 05:19:04.672768 6019 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:19:04.672787 6019 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:19:04.672799 6019 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"ices.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:19:06.669529 6159 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: cert\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.438673 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:08Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.444843 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" Jan 22 05:19:08 crc kubenswrapper[4814]: W0122 05:19:08.463452 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78f0ef15_ba39_4f8f_b3df_7fb6671e7a79.slice/crio-a7174fd7696580dfa286b9508a8d5b5d694507c0aeb0613027df51e66afbee40 WatchSource:0}: Error finding container a7174fd7696580dfa286b9508a8d5b5d694507c0aeb0613027df51e66afbee40: Status 404 returned error can't find the container with id a7174fd7696580dfa286b9508a8d5b5d694507c0aeb0613027df51e66afbee40 Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.523123 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.523238 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.523263 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.523297 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.523571 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.626408 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.626441 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.626452 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.626467 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.626478 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.660738 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/1.log" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.671195 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" event={"ID":"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79","Type":"ContainerStarted","Data":"a7174fd7696580dfa286b9508a8d5b5d694507c0aeb0613027df51e66afbee40"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.729657 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.729699 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.729710 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.729749 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.729761 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.832737 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.832825 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.832849 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.832877 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.832900 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.934999 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.935053 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.935068 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.935094 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.935109 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:08Z","lastTransitionTime":"2026-01-22T05:19:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:08 crc kubenswrapper[4814]: I0122 05:19:08.992409 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:19:08 crc kubenswrapper[4814]: E0122 05:19:08.992656 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:19:24.992600138 +0000 UTC m=+51.076088363 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.038202 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.038295 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.038355 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.038391 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.038453 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.093992 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.094083 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.094204 4814 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.094211 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.094292 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:25.094273017 +0000 UTC m=+51.177761222 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.094352 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.094438 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.094460 4814 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.094367 4814 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.094570 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:25.094548795 +0000 UTC m=+51.178037050 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.094588 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:25.094581176 +0000 UTC m=+51.178069481 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.141223 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.141285 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.141305 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.141332 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.141350 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.195394 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.195690 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.195752 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.195776 4814 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.195880 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:25.195853404 +0000 UTC m=+51.279341659 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.244101 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.244178 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.244196 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.244221 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.244239 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.288139 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 01:58:19.975901585 +0000 UTC Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.300168 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-nmwv2"] Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.300881 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.300975 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.322060 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.340657 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.342941 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.342986 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.343146 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.343302 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.347105 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.347447 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.347464 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.347487 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.347504 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.364072 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.382162 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.398295 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vf9vr\" (UniqueName: \"kubernetes.io/projected/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-kube-api-access-vf9vr\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.398359 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.402053 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.416914 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.433590 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.447022 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.449817 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.449846 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.449857 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.449873 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.449883 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.464455 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.474858 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.494389 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.499567 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vf9vr\" (UniqueName: \"kubernetes.io/projected/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-kube-api-access-vf9vr\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.499609 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.499759 4814 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: E0122 05:19:09.499809 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs podName:33d4bb42-6c3b-4a42-bf7b-bb9a780f7873 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:09.999794511 +0000 UTC m=+36.083282736 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs") pod "network-metrics-daemon-nmwv2" (UID: "33d4bb42-6c3b-4a42-bf7b-bb9a780f7873") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.516658 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.528489 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vf9vr\" (UniqueName: \"kubernetes.io/projected/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-kube-api-access-vf9vr\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.547826 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:04Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670536 6019 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.670764 6019 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670849 6019 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.672609 6019 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:19:04.672667 6019 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:19:04.672689 6019 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 05:19:04.672702 6019 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 05:19:04.672725 6019 factory.go:656] Stopping watch factory\\\\nI0122 05:19:04.672754 6019 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 05:19:04.672768 6019 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:19:04.672787 6019 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:19:04.672799 6019 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"ices.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:19:06.669529 6159 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: cert\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.553014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.553049 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.553059 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.553073 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.553085 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.565176 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.580008 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.590885 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.655176 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.655212 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.655226 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.655245 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.655258 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.675422 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" event={"ID":"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79","Type":"ContainerStarted","Data":"993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.675472 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" event={"ID":"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79","Type":"ContainerStarted","Data":"4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.692809 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.717855 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.733937 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.749352 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.758889 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.758934 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.758952 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.758975 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.758991 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.770042 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.786610 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.803022 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.815141 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.834324 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.850367 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.860987 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.862132 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.862209 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.862235 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.862284 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.862318 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.874309 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.889242 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.902117 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.924356 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.953902 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:04Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670536 6019 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.670764 6019 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670849 6019 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.672609 6019 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:19:04.672667 6019 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:19:04.672689 6019 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 05:19:04.672702 6019 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 05:19:04.672725 6019 factory.go:656] Stopping watch factory\\\\nI0122 05:19:04.672754 6019 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 05:19:04.672768 6019 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:19:04.672787 6019 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:19:04.672799 6019 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"ices.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:19:06.669529 6159 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: cert\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:09Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.964860 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.964961 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.964978 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.965001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:09 crc kubenswrapper[4814]: I0122 05:19:09.965018 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:09Z","lastTransitionTime":"2026-01-22T05:19:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.012854 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:10 crc kubenswrapper[4814]: E0122 05:19:10.013031 4814 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:10 crc kubenswrapper[4814]: E0122 05:19:10.013141 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs podName:33d4bb42-6c3b-4a42-bf7b-bb9a780f7873 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:11.01311458 +0000 UTC m=+37.096602875 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs") pod "network-metrics-daemon-nmwv2" (UID: "33d4bb42-6c3b-4a42-bf7b-bb9a780f7873") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.067020 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.067086 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.067103 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.067130 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.067147 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.169884 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.169943 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.169963 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.169994 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.170017 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.272545 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.272604 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.272620 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.272671 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.272689 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.288887 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 05:32:45.293397766 +0000 UTC Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.342912 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:10 crc kubenswrapper[4814]: E0122 05:19:10.343104 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.375266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.375341 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.375366 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.375395 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.375419 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.477926 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.477983 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.477995 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.478014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.478026 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.581606 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.581686 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.581703 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.581746 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.581764 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.685001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.685065 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.685083 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.685111 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.685128 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.788406 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.788453 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.788470 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.788494 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.788512 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.891296 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.891357 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.891405 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.891446 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.891468 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.994906 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.994965 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.994981 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.995005 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:10 crc kubenswrapper[4814]: I0122 05:19:10.995023 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:10Z","lastTransitionTime":"2026-01-22T05:19:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.024366 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:11 crc kubenswrapper[4814]: E0122 05:19:11.024567 4814 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:11 crc kubenswrapper[4814]: E0122 05:19:11.024682 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs podName:33d4bb42-6c3b-4a42-bf7b-bb9a780f7873 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:13.024657322 +0000 UTC m=+39.108145567 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs") pod "network-metrics-daemon-nmwv2" (UID: "33d4bb42-6c3b-4a42-bf7b-bb9a780f7873") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.098422 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.098474 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.098491 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.098516 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.098533 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.202407 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.202457 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.202476 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.202499 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.202518 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.289276 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 12:52:52.366822646 +0000 UTC Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.305838 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.305886 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.305902 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.305931 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.305952 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.343651 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.343743 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:11 crc kubenswrapper[4814]: E0122 05:19:11.343815 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:11 crc kubenswrapper[4814]: E0122 05:19:11.343904 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.343988 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:11 crc kubenswrapper[4814]: E0122 05:19:11.344069 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.414095 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.414407 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.414465 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.414506 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.414525 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.518877 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.518999 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.519020 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.519047 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.519070 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.621793 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.621847 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.621864 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.621887 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.621906 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.725163 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.726364 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.726520 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.726684 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.726841 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.830546 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.830945 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.831090 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.831231 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.831385 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.935245 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.935310 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.935332 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.935360 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:11 crc kubenswrapper[4814]: I0122 05:19:11.935393 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:11Z","lastTransitionTime":"2026-01-22T05:19:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.038501 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.038921 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.039138 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.039328 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.039472 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.142243 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.142596 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.142774 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.143016 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.143182 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.246273 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.246622 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.246817 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.247164 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.247405 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.290157 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 02:56:40.701500596 +0000 UTC Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.342888 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:12 crc kubenswrapper[4814]: E0122 05:19:12.343084 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.351030 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.351092 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.351111 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.351178 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.351203 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.454067 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.454114 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.454133 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.454157 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.454175 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.556795 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.556851 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.556867 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.556891 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.556908 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.659025 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.659089 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.659111 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.659142 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.659164 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.761730 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.762544 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.762743 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.762891 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.763019 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.866023 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.866386 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.866535 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.866715 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.866849 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.969805 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.969878 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.969901 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.969933 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:12 crc kubenswrapper[4814]: I0122 05:19:12.969951 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:12Z","lastTransitionTime":"2026-01-22T05:19:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.044906 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:13 crc kubenswrapper[4814]: E0122 05:19:13.045406 4814 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:13 crc kubenswrapper[4814]: E0122 05:19:13.045696 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs podName:33d4bb42-6c3b-4a42-bf7b-bb9a780f7873 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:17.045618371 +0000 UTC m=+43.129106626 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs") pod "network-metrics-daemon-nmwv2" (UID: "33d4bb42-6c3b-4a42-bf7b-bb9a780f7873") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.073262 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.073312 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.073324 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.073342 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.073355 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.177909 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.177976 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.178001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.178034 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.178055 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.281672 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.281755 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.281782 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.281813 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.281835 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.291391 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 14:08:52.199902418 +0000 UTC Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.342792 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.342791 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:13 crc kubenswrapper[4814]: E0122 05:19:13.342984 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:13 crc kubenswrapper[4814]: E0122 05:19:13.343158 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.342826 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:13 crc kubenswrapper[4814]: E0122 05:19:13.343383 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.385330 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.385385 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.385402 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.385437 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.385456 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.488819 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.489227 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.489416 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.489657 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.489888 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.594277 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.595001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.595280 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.595810 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.596299 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.700583 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.700701 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.700728 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.700756 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.700774 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.804231 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.804289 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.804308 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.804333 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.804351 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.907229 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.907802 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.908020 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.908243 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:13 crc kubenswrapper[4814]: I0122 05:19:13.908432 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:13Z","lastTransitionTime":"2026-01-22T05:19:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.011521 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.011997 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.012205 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.012432 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.012656 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.115964 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.116017 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.116031 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.116050 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.116064 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.218286 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.218358 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.218382 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.218414 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.218436 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.292563 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 05:35:47.816262354 +0000 UTC Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.321061 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.321140 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.321162 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.321193 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.321216 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.343702 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:14 crc kubenswrapper[4814]: E0122 05:19:14.343888 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.365284 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.386425 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.403103 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.419436 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.425028 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.425324 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.425540 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.425744 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.425950 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.438738 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.455183 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.478141 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.509167 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://597c948d3de2a751878f21b822ca6ddb2c01451566cb91484ab23e6b608126b6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:04Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670536 6019 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.670764 6019 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0122 05:19:04.670849 6019 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI0122 05:19:04.672609 6019 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0122 05:19:04.672667 6019 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0122 05:19:04.672689 6019 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0122 05:19:04.672702 6019 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0122 05:19:04.672725 6019 factory.go:656] Stopping watch factory\\\\nI0122 05:19:04.672754 6019 handler.go:208] Removed *v1.Node event handler 7\\\\nI0122 05:19:04.672768 6019 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0122 05:19:04.672787 6019 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0122 05:19:04.672799 6019 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"ices.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:19:06.669529 6159 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: cert\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.528901 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.529917 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.529971 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.529989 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.530012 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.530031 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.564755 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.584553 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.602942 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.622871 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.640339 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.640438 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.640459 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.640485 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.640503 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.645686 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.663997 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.683974 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:14Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.743845 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.743931 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.743955 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.743988 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.744014 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.847335 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.847396 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.847418 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.847448 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.847472 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.951282 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.951353 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.951369 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.951399 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:14 crc kubenswrapper[4814]: I0122 05:19:14.951416 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:14Z","lastTransitionTime":"2026-01-22T05:19:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.053955 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.054024 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.054043 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.054069 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.054087 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.157556 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.157655 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.157673 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.157698 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.157715 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.260104 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.260229 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.260256 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.260287 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.260310 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.293404 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 18:04:59.563488036 +0000 UTC Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.343135 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.343187 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.343158 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:15 crc kubenswrapper[4814]: E0122 05:19:15.343359 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:15 crc kubenswrapper[4814]: E0122 05:19:15.343610 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:15 crc kubenswrapper[4814]: E0122 05:19:15.343818 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.363906 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.363965 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.363984 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.364011 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.364032 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.467750 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.468185 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.468203 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.468225 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.468244 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.571909 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.571997 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.572018 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.572041 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.572060 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.675177 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.675249 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.675266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.675292 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.675314 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.779392 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.779495 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.779516 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.779544 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.779561 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.882194 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.882255 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.882275 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.882302 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.882319 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.985810 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.985920 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.985938 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.985964 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:15 crc kubenswrapper[4814]: I0122 05:19:15.985982 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:15Z","lastTransitionTime":"2026-01-22T05:19:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.088310 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.088376 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.088393 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.088416 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.088435 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.190950 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.190981 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.190991 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.191009 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.191019 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.293585 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 08:12:32.144293521 +0000 UTC Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.298485 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.298520 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.298536 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.298560 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.298576 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.343667 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:16 crc kubenswrapper[4814]: E0122 05:19:16.343858 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.401359 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.401423 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.401444 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.401473 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.401491 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.504454 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.504527 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.504549 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.504578 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.504604 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.606956 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.607004 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.607019 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.607039 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.607056 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.710200 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.710250 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.710266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.710292 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.710312 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.812982 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.813031 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.813048 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.813079 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.813098 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.916264 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.916322 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.916340 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.916364 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:16 crc kubenswrapper[4814]: I0122 05:19:16.916383 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:16Z","lastTransitionTime":"2026-01-22T05:19:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.019215 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.019296 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.019319 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.019348 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.019369 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.098396 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.098755 4814 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.098925 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs podName:33d4bb42-6c3b-4a42-bf7b-bb9a780f7873 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:25.098892771 +0000 UTC m=+51.182381016 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs") pod "network-metrics-daemon-nmwv2" (UID: "33d4bb42-6c3b-4a42-bf7b-bb9a780f7873") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.121891 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.121958 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.121981 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.122027 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.122049 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.224874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.224957 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.224976 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.225005 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.225026 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.294806 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 23:55:29.767686505 +0000 UTC Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.328550 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.328621 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.328700 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.328736 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.328762 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.342978 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.343060 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.343168 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.342994 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.343364 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.343541 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.431901 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.431974 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.431997 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.432028 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.432052 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.535570 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.535619 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.535661 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.535685 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.535703 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.639375 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.639441 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.639463 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.639492 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.639514 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.742122 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.742181 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.742198 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.742219 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.742235 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.838491 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.838590 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.838609 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.838681 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.838704 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.860965 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.866874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.866935 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.866957 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.866986 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.867034 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.888938 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.893681 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.893740 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.893760 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.893786 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.893805 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.912777 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.917837 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.917885 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.917904 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.917928 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.917946 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.937680 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.942031 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.942199 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.942344 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.942479 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.942605 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.964068 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:17Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:17 crc kubenswrapper[4814]: E0122 05:19:17.964697 4814 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.966928 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.967000 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.967017 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.967043 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:17 crc kubenswrapper[4814]: I0122 05:19:17.967062 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:17Z","lastTransitionTime":"2026-01-22T05:19:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.070155 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.070202 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.070218 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.070242 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.070260 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.173507 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.173558 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.173576 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.173600 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.173617 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.278156 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.278313 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.278343 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.278415 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.278440 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.295482 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 17:01:51.968900148 +0000 UTC Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.343552 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:18 crc kubenswrapper[4814]: E0122 05:19:18.343712 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.381923 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.382201 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.382353 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.382503 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.382620 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.393215 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.394384 4814 scope.go:117] "RemoveContainer" containerID="94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.414842 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.437964 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.459059 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.482948 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.485996 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.486048 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.486065 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.486087 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.486104 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.527768 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.554503 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.580608 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.588451 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.588488 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.588499 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.588517 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.588529 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.592185 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.604651 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.616125 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.624318 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.633383 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.641042 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.652348 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.667808 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"ices.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:19:06.669529 6159 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: cert\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.677592 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:18Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.691232 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.691264 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.691274 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.691291 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.691302 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.793197 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.793253 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.793271 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.793297 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.793315 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.895366 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.895431 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.895449 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.895475 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.895492 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.998119 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.998164 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.998182 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.998207 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:18 crc kubenswrapper[4814]: I0122 05:19:18.998224 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:18Z","lastTransitionTime":"2026-01-22T05:19:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.100289 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.100326 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.100335 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.100358 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.100369 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.203856 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.203942 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.203961 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.203985 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.204033 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.296449 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 14:03:35.00754164 +0000 UTC Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.306496 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.306549 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.306577 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.306603 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.306619 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.342905 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.342926 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:19 crc kubenswrapper[4814]: E0122 05:19:19.343086 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.342932 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:19 crc kubenswrapper[4814]: E0122 05:19:19.343209 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:19 crc kubenswrapper[4814]: E0122 05:19:19.343230 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.409165 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.409194 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.409202 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.409241 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.409257 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.512429 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.512460 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.512467 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.512480 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.512489 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.615930 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.615992 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.616012 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.616035 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.616052 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.716561 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/1.log" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.718576 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.718691 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.718886 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.718908 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.718926 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.722319 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.722869 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.747247 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.773485 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.793163 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.813130 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.822593 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.822666 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.822684 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.822709 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.822726 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.834270 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.852880 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.869940 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.887282 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.908159 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.924902 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.924957 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.924977 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.925004 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.925020 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:19Z","lastTransitionTime":"2026-01-22T05:19:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.929357 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.945019 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.963031 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:19 crc kubenswrapper[4814]: I0122 05:19:19.978790 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.001875 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:19Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.027352 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.027437 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.027460 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.027490 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.027510 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.033473 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"ices.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:19:06.669529 6159 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: cert\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.052159 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.130946 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.131014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.131032 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.131055 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.131073 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.234711 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.234751 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.234763 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.234781 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.234975 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.296781 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 01:42:01.789923845 +0000 UTC Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.337746 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.337815 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.337841 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.337874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.337898 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.343153 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:20 crc kubenswrapper[4814]: E0122 05:19:20.343321 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.441694 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.441749 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.441775 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.441804 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.441828 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.545012 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.545082 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.545101 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.545126 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.545144 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.648521 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.648578 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.648594 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.648650 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.648672 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.730007 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/2.log" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.731270 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/1.log" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.735982 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b" exitCode=1 Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.736049 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.736111 4814 scope.go:117] "RemoveContainer" containerID="94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.737205 4814 scope.go:117] "RemoveContainer" containerID="fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b" Jan 22 05:19:20 crc kubenswrapper[4814]: E0122 05:19:20.737449 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.754674 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.754877 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.755095 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.755334 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.755521 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.764182 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.786793 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.813837 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.839396 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.857992 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.861157 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.861233 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.861257 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.861287 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.861309 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.875503 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.894611 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.911966 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.926203 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.947002 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.971703 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.975095 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.975160 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.975178 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.975204 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.975226 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:20Z","lastTransitionTime":"2026-01-22T05:19:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:20 crc kubenswrapper[4814]: I0122 05:19:20.987412 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.001296 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:20Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.024133 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.057575 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a6198647f5c7e1b956927b0cab08b46147ebaeac266f62f528e0d533059d02\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:07Z\\\",\\\"message\\\":\\\"ices.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-scheduler/scheduler_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-scheduler/scheduler\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.169\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF0122 05:19:06.669529 6159 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: cert\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.076098 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.078082 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.078144 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.078161 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.078185 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.078204 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.181312 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.181369 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.181387 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.181411 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.181428 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.284892 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.284934 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.284951 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.284973 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.284990 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.297934 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 17:29:20.073147887 +0000 UTC Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.343021 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.343092 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:21 crc kubenswrapper[4814]: E0122 05:19:21.343205 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.343479 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:21 crc kubenswrapper[4814]: E0122 05:19:21.343497 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:21 crc kubenswrapper[4814]: E0122 05:19:21.344032 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.388321 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.388406 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.388425 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.388450 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.388467 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.492226 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.492325 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.492344 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.492370 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.492388 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.595393 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.595471 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.595513 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.595546 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.595567 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.698403 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.698470 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.698493 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.698527 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.698550 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.743582 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/2.log" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.749891 4814 scope.go:117] "RemoveContainer" containerID="fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b" Jan 22 05:19:21 crc kubenswrapper[4814]: E0122 05:19:21.750149 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.774588 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.793676 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.801745 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.801818 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.801844 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.801876 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.801899 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.812019 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.828279 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.844461 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.862767 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.887866 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.902114 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.904379 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.904573 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.904753 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.904936 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.905346 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:21Z","lastTransitionTime":"2026-01-22T05:19:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.917520 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.936772 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.957471 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.975229 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:21 crc kubenswrapper[4814]: I0122 05:19:21.995277 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:21Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.008084 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.008142 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.008159 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.008183 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.008200 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.014567 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.030415 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.044944 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:22Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.111042 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.111280 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.111368 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.111487 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.111576 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.213874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.214056 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.214140 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.214233 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.214321 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.299199 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 16:36:44.829866393 +0000 UTC Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.317449 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.317737 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.317941 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.318112 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.318257 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.343069 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:22 crc kubenswrapper[4814]: E0122 05:19:22.343383 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.421019 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.421101 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.421122 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.421157 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.421179 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.525219 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.525279 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.525296 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.525321 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.525340 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.628723 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.628793 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.628810 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.628836 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.628854 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.732500 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.732560 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.732594 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.732666 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.732716 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.836271 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.836327 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.836344 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.836367 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.836384 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.939420 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.939472 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.939488 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.939515 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:22 crc kubenswrapper[4814]: I0122 05:19:22.939531 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:22Z","lastTransitionTime":"2026-01-22T05:19:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.042591 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.042676 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.042690 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.042710 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.042723 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.145258 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.145324 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.145343 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.145368 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.145385 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.249099 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.249200 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.249228 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.249260 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.249283 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.299660 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 02:02:55.36601732 +0000 UTC Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.343208 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.343297 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.343220 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:23 crc kubenswrapper[4814]: E0122 05:19:23.343421 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:23 crc kubenswrapper[4814]: E0122 05:19:23.343537 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:23 crc kubenswrapper[4814]: E0122 05:19:23.343711 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.351814 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.351865 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.351882 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.351903 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.351920 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.455055 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.455149 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.455209 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.455239 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.455296 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.558413 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.558505 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.558527 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.558579 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.558598 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.661254 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.661310 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.661328 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.661353 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.661372 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.764761 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.764821 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.764843 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.764865 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.764882 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.868266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.868341 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.868364 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.868393 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.868414 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.971110 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.971152 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.971160 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.971176 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:23 crc kubenswrapper[4814]: I0122 05:19:23.971186 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:23Z","lastTransitionTime":"2026-01-22T05:19:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.077308 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.078246 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.078390 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.078565 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.078727 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.182073 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.182165 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.182191 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.182226 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.182264 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.285239 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.285305 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.285323 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.285348 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.285365 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.300775 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 23:23:37.498709039 +0000 UTC Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.343737 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:24 crc kubenswrapper[4814]: E0122 05:19:24.344076 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.368229 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.388997 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.389050 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.389066 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.389092 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.389108 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.389988 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.408148 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.423427 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.441981 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.459084 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.483609 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.492859 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.492947 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.493014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.493039 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.493055 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.514663 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.534749 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.558971 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.582345 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.594893 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.594977 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.595000 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.595012 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.595029 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.595041 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.608596 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.619223 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.629746 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.639198 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:24Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.697727 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.697788 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.697805 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.697830 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.697851 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.800269 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.800325 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.800341 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.800364 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.800383 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.903348 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.903406 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.903425 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.903449 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:24 crc kubenswrapper[4814]: I0122 05:19:24.903465 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:24Z","lastTransitionTime":"2026-01-22T05:19:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.006455 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.006517 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.006539 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.006567 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.006589 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.088300 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.088575 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:19:57.088535012 +0000 UTC m=+83.172023287 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.109402 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.109459 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.109477 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.109500 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.109516 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.190346 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.190422 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.190473 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.190538 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.190730 4814 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.190805 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:57.190783179 +0000 UTC m=+83.274271434 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.190841 4814 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.190894 4814 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.190950 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:57.190920153 +0000 UTC m=+83.274408408 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.190982 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs podName:33d4bb42-6c3b-4a42-bf7b-bb9a780f7873 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:41.190968905 +0000 UTC m=+67.274457150 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs") pod "network-metrics-daemon-nmwv2" (UID: "33d4bb42-6c3b-4a42-bf7b-bb9a780f7873") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.191099 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.191122 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.191144 4814 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.191190 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:57.19117723 +0000 UTC m=+83.274665475 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.213355 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.213446 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.213467 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.213519 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.213538 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.291436 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.291730 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.291765 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.291784 4814 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.291869 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:19:57.291842001 +0000 UTC m=+83.375330256 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.301309 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 09:15:32.299005507 +0000 UTC Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.316084 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.316137 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.316155 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.316180 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.316203 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.343504 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.343622 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.343510 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.343746 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.343879 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:25 crc kubenswrapper[4814]: E0122 05:19:25.344081 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.420424 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.420501 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.420518 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.420545 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.420567 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.523722 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.523860 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.523883 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.523907 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.523957 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.626578 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.626691 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.626718 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.626750 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.626770 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.730100 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.730186 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.730211 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.730245 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.730263 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.833757 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.833826 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.833844 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.833874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.833895 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.936573 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.936665 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.936705 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.936739 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:25 crc kubenswrapper[4814]: I0122 05:19:25.936763 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:25Z","lastTransitionTime":"2026-01-22T05:19:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.039347 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.039420 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.039443 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.039473 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.039496 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.142563 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.142675 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.142698 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.142722 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.142743 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.245944 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.246011 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.246034 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.246068 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.246095 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.301914 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 04:19:03.566705595 +0000 UTC Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.343507 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:26 crc kubenswrapper[4814]: E0122 05:19:26.343729 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.348544 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.348608 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.348660 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.348691 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.348713 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.451669 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.451724 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.451741 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.451767 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.451785 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.554578 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.554650 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.554662 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.554680 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.554693 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.657319 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.657379 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.657395 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.657419 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.657438 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.759945 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.759983 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.759994 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.760012 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.760028 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.864025 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.864061 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.864071 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.864087 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.864098 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.967500 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.967554 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.967572 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.967598 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:26 crc kubenswrapper[4814]: I0122 05:19:26.967616 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:26Z","lastTransitionTime":"2026-01-22T05:19:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.070489 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.070549 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.070571 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.070597 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.070616 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.173487 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.173521 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.173538 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.173558 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.173573 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.276880 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.276957 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.276981 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.277013 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.277039 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.302415 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 11:48:04.334016515 +0000 UTC Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.342951 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.343118 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:27 crc kubenswrapper[4814]: E0122 05:19:27.343247 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.343354 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:27 crc kubenswrapper[4814]: E0122 05:19:27.343428 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:27 crc kubenswrapper[4814]: E0122 05:19:27.343696 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.380950 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.381008 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.381023 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.381044 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.381059 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.483820 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.483874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.483890 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.483911 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.483928 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.586531 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.586556 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.586563 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.586790 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.586801 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.689261 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.689308 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.689324 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.689348 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.689364 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.791945 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.792009 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.792034 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.792062 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.792086 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.894916 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.894974 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.894990 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.895013 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.895032 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.997865 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.997911 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.997928 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.997953 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:27 crc kubenswrapper[4814]: I0122 05:19:27.997970 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:27Z","lastTransitionTime":"2026-01-22T05:19:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.100708 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.100771 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.100788 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.100812 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.100828 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.114658 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.114706 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.114723 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.114744 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.114759 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: E0122 05:19:28.130944 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:28Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.135270 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.135340 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.135361 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.135388 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.135406 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: E0122 05:19:28.150395 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:28Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.156232 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.156295 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.156342 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.156371 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.156390 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: E0122 05:19:28.173182 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:28Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.178006 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.178055 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.178072 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.178096 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.178113 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: E0122 05:19:28.196946 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:28Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.202266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.202535 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.202787 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.202953 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.203101 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: E0122 05:19:28.225910 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:28Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:28 crc kubenswrapper[4814]: E0122 05:19:28.226491 4814 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.228851 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.228903 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.228924 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.228948 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.228969 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.303529 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 18:54:41.03747615 +0000 UTC Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.332351 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.332440 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.332458 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.332487 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.332504 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.352914 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:28 crc kubenswrapper[4814]: E0122 05:19:28.353864 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.435887 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.435955 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.435972 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.436000 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.436022 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.539025 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.539090 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.539107 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.539132 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.539149 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.641502 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.641542 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.641555 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.641572 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.641583 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.744127 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.744160 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.744170 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.744182 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.744192 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.847865 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.847977 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.848000 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.848063 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.848083 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.950863 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.950900 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.950912 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.950929 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:28 crc kubenswrapper[4814]: I0122 05:19:28.950940 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:28Z","lastTransitionTime":"2026-01-22T05:19:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.054440 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.054530 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.054554 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.054582 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.054602 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.158404 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.158820 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.159009 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.159151 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.159448 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.262016 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.262056 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.262068 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.262096 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.262116 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.304245 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 16:42:11.28295021 +0000 UTC Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.343393 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.343465 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.343388 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:29 crc kubenswrapper[4814]: E0122 05:19:29.343581 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:29 crc kubenswrapper[4814]: E0122 05:19:29.343741 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:29 crc kubenswrapper[4814]: E0122 05:19:29.343818 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.364778 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.364842 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.364860 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.364887 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.364905 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.467934 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.468289 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.468445 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.468601 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.468790 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.572001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.572064 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.572082 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.572107 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.572125 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.674688 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.674736 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.674751 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.674774 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.674790 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.715522 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.730895 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.742526 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.772923 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.777854 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.777916 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.777933 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.777958 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.777976 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.789893 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.809724 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.826891 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.846687 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.868681 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.880441 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.880483 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.880502 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.880525 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.880541 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.887946 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.905175 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.923854 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.946882 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.970198 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.983342 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.983403 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.983422 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.983447 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.983464 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:29Z","lastTransitionTime":"2026-01-22T05:19:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.985191 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:29 crc kubenswrapper[4814]: I0122 05:19:29.999564 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:29Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.017834 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:30Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.036127 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:30Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.087031 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.087102 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.087126 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.087156 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.087174 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.190679 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.190730 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.190746 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.190772 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.190788 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.293877 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.293930 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.293947 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.293970 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.293987 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.305138 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 03:09:41.946091567 +0000 UTC Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.342807 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:30 crc kubenswrapper[4814]: E0122 05:19:30.342964 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.396724 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.396803 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.396827 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.396856 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.396880 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.499143 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.499190 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.499199 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.499212 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.499223 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.602776 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.602852 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.602875 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.602908 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.602931 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.706736 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.706810 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.706826 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.706852 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.706868 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.809812 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.809904 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.809926 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.809959 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.809983 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.912761 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.912820 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.912838 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.912861 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:30 crc kubenswrapper[4814]: I0122 05:19:30.912879 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:30Z","lastTransitionTime":"2026-01-22T05:19:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.015442 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.015502 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.015520 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.015546 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.015566 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.118696 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.118789 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.118804 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.118822 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.118836 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.221716 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.221785 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.221810 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.221839 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.221861 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.305968 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 17:57:33.830250748 +0000 UTC Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.324484 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.324548 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.324566 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.324591 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.324609 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.342857 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.342963 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:31 crc kubenswrapper[4814]: E0122 05:19:31.343032 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.343058 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:31 crc kubenswrapper[4814]: E0122 05:19:31.343235 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:31 crc kubenswrapper[4814]: E0122 05:19:31.343565 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.428216 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.428272 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.428291 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.428316 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.428333 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.531451 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.531525 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.531549 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.531572 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.531591 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.634246 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.634280 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.634289 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.634304 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.634313 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.737460 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.737519 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.737537 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.737562 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.737580 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.840447 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.840544 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.840561 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.840586 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.840609 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.943667 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.943720 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.943737 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.943763 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:31 crc kubenswrapper[4814]: I0122 05:19:31.943782 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:31Z","lastTransitionTime":"2026-01-22T05:19:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.046191 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.046245 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.046262 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.046285 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.046302 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.149712 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.149788 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.149813 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.149845 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.149868 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.252999 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.253069 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.253088 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.253113 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.253131 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.306303 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 06:27:54.944182527 +0000 UTC Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.343076 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:32 crc kubenswrapper[4814]: E0122 05:19:32.343281 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.356358 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.356416 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.356435 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.356462 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.356480 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.459775 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.459843 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.459860 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.459886 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.459904 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.562347 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.562412 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.562428 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.562453 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.562471 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.665196 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.665287 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.665307 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.665333 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.665351 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.768147 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.768197 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.768213 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.768239 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.768256 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.871228 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.871278 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.871295 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.871318 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.871334 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.974419 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.974478 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.974496 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.974522 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:32 crc kubenswrapper[4814]: I0122 05:19:32.974541 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:32Z","lastTransitionTime":"2026-01-22T05:19:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.077797 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.077860 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.077884 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.077914 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.077934 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:33Z","lastTransitionTime":"2026-01-22T05:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.322348 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 11:51:47.229807865 +0000 UTC Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.324336 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.324376 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.324409 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.324428 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.324440 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:33Z","lastTransitionTime":"2026-01-22T05:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.343044 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.343118 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.343131 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:33 crc kubenswrapper[4814]: E0122 05:19:33.343270 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:33 crc kubenswrapper[4814]: E0122 05:19:33.343397 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:33 crc kubenswrapper[4814]: E0122 05:19:33.343546 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.427146 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.427215 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.427234 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.427266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.427288 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:33Z","lastTransitionTime":"2026-01-22T05:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.530735 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.530841 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.530866 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.530895 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.530916 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:33Z","lastTransitionTime":"2026-01-22T05:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.634231 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.634310 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.634333 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.634367 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.634389 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:33Z","lastTransitionTime":"2026-01-22T05:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.738015 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.738109 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.738136 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.738167 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.738192 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:33Z","lastTransitionTime":"2026-01-22T05:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.841901 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.841969 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.841987 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.842014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.842035 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:33Z","lastTransitionTime":"2026-01-22T05:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.945263 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.945329 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.945347 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.945376 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:33 crc kubenswrapper[4814]: I0122 05:19:33.945395 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:33Z","lastTransitionTime":"2026-01-22T05:19:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.048310 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.048374 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.048392 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.048416 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.048433 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.151685 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.151750 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.151767 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.151799 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.151824 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.255273 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.255370 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.255409 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.255442 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.255465 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.323110 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 11:04:15.455888195 +0000 UTC Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.343075 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.344300 4814 scope.go:117] "RemoveContainer" containerID="fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b" Jan 22 05:19:34 crc kubenswrapper[4814]: E0122 05:19:34.344575 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:19:34 crc kubenswrapper[4814]: E0122 05:19:34.345098 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.373943 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.374324 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.374493 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.374688 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.374848 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.393206 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.428810 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.443906 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.453514 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.461401 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.473707 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.477172 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.477223 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.477239 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.477261 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.477275 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.489430 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.499855 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.514070 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.527080 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.538290 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.552351 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9aa37e0c-14c8-46c7-8def-981092890be4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.569430 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.580293 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.580361 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.580381 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.580407 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.580423 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.590310 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.604819 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.618429 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.630326 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:34Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.682542 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.682605 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.682622 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.682677 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.682695 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.785159 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.785208 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.785223 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.785243 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.785257 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.888009 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.888187 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.888214 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.888247 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.888274 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.991820 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.991880 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.991901 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.991925 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:34 crc kubenswrapper[4814]: I0122 05:19:34.991943 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:34Z","lastTransitionTime":"2026-01-22T05:19:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.094893 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.094970 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.094996 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.095023 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.095039 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.198936 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.199806 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.200017 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.200214 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.200375 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.305937 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.306326 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.306508 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.306701 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.306849 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.323682 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 14:31:31.472372622 +0000 UTC Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.342971 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.343020 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.342987 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:35 crc kubenswrapper[4814]: E0122 05:19:35.343171 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:35 crc kubenswrapper[4814]: E0122 05:19:35.343289 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:35 crc kubenswrapper[4814]: E0122 05:19:35.343541 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.410026 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.410115 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.410135 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.410192 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.410211 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.514188 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.514314 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.514343 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.514383 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.514409 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.617596 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.617717 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.617740 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.617772 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.617794 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.720993 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.721060 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.721077 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.721103 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.721120 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.824149 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.824204 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.824220 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.824249 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.824269 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.928012 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.928067 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.928084 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.928110 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:35 crc kubenswrapper[4814]: I0122 05:19:35.928126 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:35Z","lastTransitionTime":"2026-01-22T05:19:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.030995 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.031329 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.031421 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.031494 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.031563 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.134796 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.134846 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.134865 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.134889 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.134906 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.238762 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.238858 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.238886 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.238919 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.238951 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.324843 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 06:32:19.738789633 +0000 UTC Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.342749 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:36 crc kubenswrapper[4814]: E0122 05:19:36.342924 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.342754 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.343265 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.343423 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.343590 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.343924 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.447219 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.447264 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.447283 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.447305 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.447323 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.550220 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.550670 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.550856 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.551010 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.551159 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.654373 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.654444 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.654463 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.654489 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.654508 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.758205 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.758282 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.758301 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.758330 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.758356 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.861451 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.861540 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.861566 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.861598 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.861621 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.964948 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.965001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.965093 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.965121 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:36 crc kubenswrapper[4814]: I0122 05:19:36.965389 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:36Z","lastTransitionTime":"2026-01-22T05:19:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.068130 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.068163 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.068173 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.068188 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.068200 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.202370 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.202432 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.202448 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.202471 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.202486 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.304885 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.304982 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.305011 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.305043 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.305068 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.325397 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 06:10:15.885485148 +0000 UTC Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.342943 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.342998 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:37 crc kubenswrapper[4814]: E0122 05:19:37.343072 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.343106 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:37 crc kubenswrapper[4814]: E0122 05:19:37.343275 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:37 crc kubenswrapper[4814]: E0122 05:19:37.343395 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.408298 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.408340 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.408349 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.408366 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.408376 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.511247 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.511285 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.511294 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.511308 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.511319 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.614571 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.614611 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.614646 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.614664 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.614672 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.718707 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.718784 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.718808 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.718839 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.718861 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.821576 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.821640 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.821651 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.821665 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.821673 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.924117 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.924167 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.924177 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.924220 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:37 crc kubenswrapper[4814]: I0122 05:19:37.924231 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:37Z","lastTransitionTime":"2026-01-22T05:19:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.026938 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.026984 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.027011 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.027027 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.027038 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.129462 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.129520 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.129532 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.129549 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.129558 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.231954 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.231992 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.232001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.232015 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.232025 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.326373 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 23:53:19.740884156 +0000 UTC Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.334599 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.334667 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.334681 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.334699 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.334711 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.343340 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:38 crc kubenswrapper[4814]: E0122 05:19:38.343516 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.437252 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.437292 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.437301 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.437317 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.437328 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.540120 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.540175 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.540203 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.540227 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.540245 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.620670 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.620736 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.620758 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.620789 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.620815 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: E0122 05:19:38.638903 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:38Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.642411 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.642456 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.642475 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.642499 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.642520 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: E0122 05:19:38.671590 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:38Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.678221 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.678287 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.678306 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.678329 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.678346 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: E0122 05:19:38.696576 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:38Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.701091 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.701239 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.701264 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.701295 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.701319 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: E0122 05:19:38.715989 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:38Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.722791 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.722825 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.722837 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.722855 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.722868 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: E0122 05:19:38.742615 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:38Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:38 crc kubenswrapper[4814]: E0122 05:19:38.742907 4814 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.744952 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.744991 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.745008 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.745030 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.745047 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.848152 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.848203 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.848224 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.848246 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.848263 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.950918 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.950965 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.950981 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.951003 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:38 crc kubenswrapper[4814]: I0122 05:19:38.951020 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:38Z","lastTransitionTime":"2026-01-22T05:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.053101 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.053136 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.053144 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.053160 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.053170 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.155660 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.155692 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.155703 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.155717 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.155727 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.258521 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.258550 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.258558 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.258572 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.258581 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.326668 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 21:41:40.638135829 +0000 UTC Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.343557 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:39 crc kubenswrapper[4814]: E0122 05:19:39.343678 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.343731 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:39 crc kubenswrapper[4814]: E0122 05:19:39.343772 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.343806 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:39 crc kubenswrapper[4814]: E0122 05:19:39.343843 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.360073 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.360110 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.360119 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.360134 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.360144 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.462319 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.462380 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.462398 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.462422 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.462439 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.565100 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.565156 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.565168 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.565189 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.565202 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.667918 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.667956 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.667971 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.667993 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.668009 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.769821 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.769863 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.769881 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.769903 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.769920 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.872116 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.872174 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.872192 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.872216 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.872233 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.975166 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.975256 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.975275 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.975302 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:39 crc kubenswrapper[4814]: I0122 05:19:39.975319 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:39Z","lastTransitionTime":"2026-01-22T05:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.078348 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.078430 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.078452 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.078480 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.078497 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.181597 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.181675 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.181692 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.181717 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.181736 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.283810 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.283873 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.283890 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.283915 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.283933 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.326945 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 16:06:58.775350921 +0000 UTC Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.343429 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:40 crc kubenswrapper[4814]: E0122 05:19:40.343710 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.388496 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.388551 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.388564 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.388586 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.388604 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.490777 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.490828 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.490837 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.490852 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.490862 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.593126 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.593342 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.593364 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.593376 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.593385 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.695545 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.695575 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.695583 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.695596 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.695605 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.797778 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.797811 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.797819 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.797831 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.797840 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.900478 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.900522 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.900533 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.900552 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:40 crc kubenswrapper[4814]: I0122 05:19:40.900564 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:40Z","lastTransitionTime":"2026-01-22T05:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.003014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.003399 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.003413 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.003587 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.003607 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.105864 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.105900 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.105910 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.105924 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.105935 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.208387 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.208440 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.208448 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.208463 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.208472 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.269254 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:41 crc kubenswrapper[4814]: E0122 05:19:41.269461 4814 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:41 crc kubenswrapper[4814]: E0122 05:19:41.269687 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs podName:33d4bb42-6c3b-4a42-bf7b-bb9a780f7873 nodeName:}" failed. No retries permitted until 2026-01-22 05:20:13.269658058 +0000 UTC m=+99.353146313 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs") pod "network-metrics-daemon-nmwv2" (UID: "33d4bb42-6c3b-4a42-bf7b-bb9a780f7873") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.310524 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.310566 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.310583 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.310606 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.310673 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.327806 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 09:33:35.4314131 +0000 UTC Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.343156 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:41 crc kubenswrapper[4814]: E0122 05:19:41.343314 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.343213 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:41 crc kubenswrapper[4814]: E0122 05:19:41.343504 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.343211 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:41 crc kubenswrapper[4814]: E0122 05:19:41.343699 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.413316 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.413572 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.413765 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.413903 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.414040 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.519377 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.519412 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.519424 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.519443 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.519457 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.621945 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.621977 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.621985 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.622001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.622012 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.725022 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.725082 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.725096 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.725115 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.725128 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.818065 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/0.log" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.818142 4814 generic.go:334] "Generic (PLEG): container finished" podID="22017d22-7b4d-4e3d-bbae-ff564c64bd7b" containerID="1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc" exitCode=1 Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.818185 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rq55l" event={"ID":"22017d22-7b4d-4e3d-bbae-ff564c64bd7b","Type":"ContainerDied","Data":"1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.818770 4814 scope.go:117] "RemoveContainer" containerID="1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.827177 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.827218 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.827229 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.827244 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.827254 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.833760 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.845471 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.860950 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.880596 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.891609 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.904920 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"2026-01-22T05:18:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94\\\\n2026-01-22T05:18:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94 to /host/opt/cni/bin/\\\\n2026-01-22T05:18:56Z [verbose] multus-daemon started\\\\n2026-01-22T05:18:56Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:19:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.915754 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9aa37e0c-14c8-46c7-8def-981092890be4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.928580 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.929545 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.929570 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.929579 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.929593 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.929603 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:41Z","lastTransitionTime":"2026-01-22T05:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.942131 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.956614 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.968332 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.981351 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:41 crc kubenswrapper[4814]: I0122 05:19:41.992230 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:41Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.002618 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.015078 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.030673 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.031435 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.031462 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.031470 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.031482 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.031491 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.044907 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.133402 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.133427 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.133435 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.133448 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.133457 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.235529 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.235577 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.235586 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.235604 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.235616 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.328409 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 10:50:05.938578812 +0000 UTC Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.338273 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.338307 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.338319 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.338340 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.338353 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.343567 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:42 crc kubenswrapper[4814]: E0122 05:19:42.343683 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.440971 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.441017 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.441028 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.441046 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.441057 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.543009 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.543095 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.543119 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.543144 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.543161 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.646148 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.646200 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.646208 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.646225 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.646235 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.749407 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.749457 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.749465 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.749487 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.749497 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.822346 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/0.log" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.822395 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rq55l" event={"ID":"22017d22-7b4d-4e3d-bbae-ff564c64bd7b","Type":"ContainerStarted","Data":"f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.845233 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.852989 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.853047 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.853067 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.853091 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.853109 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.857211 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.867889 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.877716 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.890770 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.903869 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.915895 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.932879 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.944400 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.955394 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.955422 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.955430 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.955444 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.955453 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:42Z","lastTransitionTime":"2026-01-22T05:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.959448 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"2026-01-22T05:18:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94\\\\n2026-01-22T05:18:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94 to /host/opt/cni/bin/\\\\n2026-01-22T05:18:56Z [verbose] multus-daemon started\\\\n2026-01-22T05:18:56Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:19:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.971379 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9aa37e0c-14c8-46c7-8def-981092890be4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.983616 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:42 crc kubenswrapper[4814]: I0122 05:19:42.996701 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:42Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.008978 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:43Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.020373 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:43Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.038773 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:43Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.058670 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.058704 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.058716 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.058732 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.058745 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.058694 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:43Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.161488 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.161544 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.161562 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.161586 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.161609 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.264610 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.264682 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.264701 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.264724 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.264740 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.328581 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 12:57:32.04382006 +0000 UTC Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.342865 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.342882 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.342938 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:43 crc kubenswrapper[4814]: E0122 05:19:43.342996 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:43 crc kubenswrapper[4814]: E0122 05:19:43.343098 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:43 crc kubenswrapper[4814]: E0122 05:19:43.343193 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.367151 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.367195 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.367207 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.367228 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.367242 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.469584 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.469612 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.469620 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.469645 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.469654 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.571944 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.571973 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.571983 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.571997 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.572008 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.676950 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.676986 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.677012 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.677026 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.677036 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.779165 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.779220 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.779233 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.779250 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.779262 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.881490 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.881587 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.881610 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.881715 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.881763 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.984112 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.984183 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.984202 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.984227 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:43 crc kubenswrapper[4814]: I0122 05:19:43.984246 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:43Z","lastTransitionTime":"2026-01-22T05:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.086160 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.086232 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.086244 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.086262 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.086275 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.188417 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.188452 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.188461 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.188476 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.188486 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.290753 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.290809 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.290828 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.290855 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.290871 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.329658 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 14:38:45.976295562 +0000 UTC Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.343346 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:44 crc kubenswrapper[4814]: E0122 05:19:44.343460 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.361003 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.375686 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.386039 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.393274 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.393307 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.393315 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.393330 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.393340 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.402168 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"2026-01-22T05:18:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94\\\\n2026-01-22T05:18:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94 to /host/opt/cni/bin/\\\\n2026-01-22T05:18:56Z [verbose] multus-daemon started\\\\n2026-01-22T05:18:56Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:19:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.418324 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9aa37e0c-14c8-46c7-8def-981092890be4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.432704 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.448305 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.461552 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.472826 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.490361 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.496375 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.496436 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.496453 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.496474 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.496493 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.504527 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.515000 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.526231 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.539419 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.548317 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.562926 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.583250 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:44Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.599493 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.599570 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.599592 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.599655 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.599685 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.701860 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.701897 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.701906 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.701921 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.701930 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.805940 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.805977 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.805986 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.806001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.806010 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.908104 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.908136 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.908145 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.908176 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:44 crc kubenswrapper[4814]: I0122 05:19:44.908186 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:44Z","lastTransitionTime":"2026-01-22T05:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.010761 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.010787 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.010796 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.010808 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.010816 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.113691 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.113749 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.113770 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.113800 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.113822 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.216587 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.216665 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.216684 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.216707 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.216725 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.318937 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.319001 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.319019 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.319044 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.319062 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.330337 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 04:18:43.604691494 +0000 UTC Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.342717 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.342824 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:45 crc kubenswrapper[4814]: E0122 05:19:45.342910 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.342950 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:45 crc kubenswrapper[4814]: E0122 05:19:45.343183 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:45 crc kubenswrapper[4814]: E0122 05:19:45.343384 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.343523 4814 scope.go:117] "RemoveContainer" containerID="fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.354903 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.421541 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.421577 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.421591 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.421609 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.421622 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.523165 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.523202 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.523212 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.523229 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.523241 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.624938 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.624973 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.624983 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.624998 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.625008 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.726920 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.726966 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.726977 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.726996 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.727009 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.829044 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.829105 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.829118 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.829137 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.829149 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.851017 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/2.log" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.854114 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.854758 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.866171 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.879805 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.888616 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.903394 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.920169 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.930404 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.931715 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.931748 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.931759 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.931781 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.931792 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:45Z","lastTransitionTime":"2026-01-22T05:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.943460 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.964025 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.975646 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"2026-01-22T05:18:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94\\\\n2026-01-22T05:18:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94 to /host/opt/cni/bin/\\\\n2026-01-22T05:18:56Z [verbose] multus-daemon started\\\\n2026-01-22T05:18:56Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:19:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.987226 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9aa37e0c-14c8-46c7-8def-981092890be4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:45 crc kubenswrapper[4814]: I0122 05:19:45.996936 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:45Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.007050 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.017538 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.026516 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.033660 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.033697 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.033707 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.033721 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.033732 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.037946 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc3683a5-0a9d-4263-adf7-beb44a5abb94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cff7a5a68e2e3e125fb9198ce7dcf1b2c7470941e9bfe4206439e2c21f409f4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.051520 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.063924 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.073070 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.135924 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.135971 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.135984 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.136006 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.136019 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.242764 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.242807 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.242818 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.242833 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.242843 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.331050 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 19:22:57.576742432 +0000 UTC Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.343440 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:46 crc kubenswrapper[4814]: E0122 05:19:46.343589 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.344921 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.344957 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.344966 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.344980 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.344989 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.447732 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.447760 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.447767 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.447779 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.447789 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.550420 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.550459 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.550467 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.550485 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.550496 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.652278 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.652312 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.652320 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.652334 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.652345 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.754998 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.755029 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.755038 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.755072 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.755080 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.856606 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.856663 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.856672 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.856687 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.856697 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.859343 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/3.log" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.860986 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/2.log" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.864715 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb" exitCode=1 Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.864786 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.864836 4814 scope.go:117] "RemoveContainer" containerID="fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.865252 4814 scope.go:117] "RemoveContainer" containerID="4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb" Jan 22 05:19:46 crc kubenswrapper[4814]: E0122 05:19:46.865379 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.876848 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.892750 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.905709 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc3683a5-0a9d-4263-adf7-beb44a5abb94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cff7a5a68e2e3e125fb9198ce7dcf1b2c7470941e9bfe4206439e2c21f409f4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.921366 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.933554 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.956261 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fadd742e3b1d127e42ec5931356752af3b52a2c08d0d4caa4fc4d78c09330c8b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:19Z\\\",\\\"message\\\":\\\" 6343 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0122 05:19:19.547708 6343 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-nmwv2] creating logical port openshift-multus_network-metrics-daemon-nmwv2 for pod on switch crc\\\\nI0122 05:19:19.547710 6343 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI0122 05:19:19.547721 6343 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0122 05:19:19.547731 6343 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI0122 05:19:19.547691 6343 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:18Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:46Z\\\",\\\"message\\\":\\\"22 05:19:46.176742 6699 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nI0122 05:19:46.176751 6699 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nF0122 05:19:46.176758 6699 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:19:46.176763 6699 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc in node crc\\\\nI0122 05:19:46.176769 6699 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operato\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.958504 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.958578 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.958602 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.958667 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.958693 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:46Z","lastTransitionTime":"2026-01-22T05:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.971945 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:46 crc kubenswrapper[4814]: I0122 05:19:46.991296 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.002164 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.014655 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.030958 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.051961 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.061378 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.061428 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.061448 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.061473 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.061490 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.070524 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.086915 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.104887 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"2026-01-22T05:18:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94\\\\n2026-01-22T05:18:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94 to /host/opt/cni/bin/\\\\n2026-01-22T05:18:56Z [verbose] multus-daemon started\\\\n2026-01-22T05:18:56Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:19:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.117985 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9aa37e0c-14c8-46c7-8def-981092890be4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.139962 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.157788 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.163760 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.163814 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.163831 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.163856 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.163873 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.291287 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.291319 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.291328 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.291343 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.291352 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.332175 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 21:52:59.086172931 +0000 UTC Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.342886 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.342940 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.342980 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:47 crc kubenswrapper[4814]: E0122 05:19:47.343078 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:47 crc kubenswrapper[4814]: E0122 05:19:47.343161 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:47 crc kubenswrapper[4814]: E0122 05:19:47.343324 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.393891 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.393936 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.393953 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.393976 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.393993 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.496890 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.496939 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.496956 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.496978 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.496995 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.599209 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.599266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.599282 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.599308 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.599327 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.701829 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.701885 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.701907 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.701934 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.701952 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.809096 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.809215 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.809274 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.809301 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.809319 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.870156 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/3.log" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.879153 4814 scope.go:117] "RemoveContainer" containerID="4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb" Jan 22 05:19:47 crc kubenswrapper[4814]: E0122 05:19:47.879295 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.892331 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc3683a5-0a9d-4263-adf7-beb44a5abb94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cff7a5a68e2e3e125fb9198ce7dcf1b2c7470941e9bfe4206439e2c21f409f4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.914902 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.914942 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.914959 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.914981 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.914998 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:47Z","lastTransitionTime":"2026-01-22T05:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.920536 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.943807 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.967587 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.986055 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:47 crc kubenswrapper[4814]: I0122 05:19:47.995254 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:47Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.011386 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.016784 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.016834 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.016853 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.016878 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.016894 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.031143 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:46Z\\\",\\\"message\\\":\\\"22 05:19:46.176742 6699 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nI0122 05:19:46.176751 6699 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nF0122 05:19:46.176758 6699 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:19:46.176763 6699 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc in node crc\\\\nI0122 05:19:46.176769 6699 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operato\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.042478 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.056669 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.075147 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"2026-01-22T05:18:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94\\\\n2026-01-22T05:18:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94 to /host/opt/cni/bin/\\\\n2026-01-22T05:18:56Z [verbose] multus-daemon started\\\\n2026-01-22T05:18:56Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:19:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.088227 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9aa37e0c-14c8-46c7-8def-981092890be4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.102576 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.117065 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.118751 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.118799 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.118819 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.118842 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.118859 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.130097 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.145249 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.157967 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.171456 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:48Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.221806 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.221847 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.221857 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.221874 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.221884 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.324144 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.324174 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.324182 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.324195 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.324205 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.332581 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 02:10:03.613209233 +0000 UTC Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.343044 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:48 crc kubenswrapper[4814]: E0122 05:19:48.343224 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.426384 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.426421 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.426432 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.426446 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.426458 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.529105 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.529161 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.529182 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.529209 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.529226 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.632061 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.632109 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.632119 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.632134 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.632144 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.735398 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.735433 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.735442 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.735458 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.735469 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.838266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.838316 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.838326 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.838345 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.838362 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.941592 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.941676 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.941689 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.941707 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:48 crc kubenswrapper[4814]: I0122 05:19:48.941718 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:48Z","lastTransitionTime":"2026-01-22T05:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.044697 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.044733 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.044742 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.044754 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.044764 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.080689 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.080719 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.080726 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.080739 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.080750 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.097958 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:49Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.103048 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.103079 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.103088 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.103102 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.103112 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.115392 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:49Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.119715 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.119790 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.119815 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.119845 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.119872 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.144068 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:49Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.148938 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.149014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.149028 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.149047 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.149328 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.168859 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:49Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.174345 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.174408 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.174427 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.174452 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.174469 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.194939 4814 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9001f652-05f0-41c2-9b56-281608fe470d\\\",\\\"systemUUID\\\":\\\"aaa9af76-19bc-4fd1-8c88-46d65f8fe036\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:49Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.195202 4814 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.197507 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.197580 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.197602 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.198605 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.198695 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.303026 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.303080 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.303099 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.303122 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.303140 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.333480 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 07:12:17.776144773 +0000 UTC Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.342893 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.342968 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.343014 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.343077 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.343182 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:49 crc kubenswrapper[4814]: E0122 05:19:49.343326 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.406076 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.406130 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.406148 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.406172 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.406189 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.508924 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.508987 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.509182 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.509207 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.509228 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.612620 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.612711 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.612734 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.612762 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.612781 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.715596 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.715691 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.715710 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.715733 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.715750 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.818967 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.819030 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.819046 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.819071 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.819088 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.922072 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.922134 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.922151 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.922176 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:49 crc kubenswrapper[4814]: I0122 05:19:49.922195 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:49Z","lastTransitionTime":"2026-01-22T05:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.025148 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.025216 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.025239 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.025269 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.025292 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.129051 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.129101 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.129119 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.129143 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.129161 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.231685 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.231741 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.231761 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.231785 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.231803 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.333622 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 08:07:14.469980267 +0000 UTC Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.334897 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.334968 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.334993 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.335025 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.335048 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.343361 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:50 crc kubenswrapper[4814]: E0122 05:19:50.343532 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.438381 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.438466 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.438486 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.438540 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.438559 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.541231 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.541274 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.541282 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.541312 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.541322 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.644926 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.644994 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.645012 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.645040 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.645057 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.748023 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.748084 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.748104 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.748129 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.748146 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.851055 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.851140 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.851158 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.851183 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.851201 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.954104 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.954171 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.954193 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.954224 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:50 crc kubenswrapper[4814]: I0122 05:19:50.954244 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:50Z","lastTransitionTime":"2026-01-22T05:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.057043 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.057113 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.057140 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.057172 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.057198 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.159980 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.160039 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.160064 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.160095 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.160111 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.263155 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.263224 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.263246 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.263277 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.263299 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.333888 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 23:33:38.978872033 +0000 UTC Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.343248 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.343248 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:51 crc kubenswrapper[4814]: E0122 05:19:51.343479 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.343277 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:51 crc kubenswrapper[4814]: E0122 05:19:51.343547 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:51 crc kubenswrapper[4814]: E0122 05:19:51.343795 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.366731 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.366797 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.366815 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.366845 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.366864 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.470372 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.470445 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.470467 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.470495 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.470515 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.573622 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.573686 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.573698 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.573715 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.573729 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.676369 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.676458 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.676472 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.676489 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.676499 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.779729 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.779783 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.779800 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.779827 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.779846 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.882852 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.882920 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.882944 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.882978 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.883002 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.986410 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.986443 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.986454 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.986469 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:51 crc kubenswrapper[4814]: I0122 05:19:51.986480 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:51Z","lastTransitionTime":"2026-01-22T05:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.092975 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.093021 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.093037 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.093062 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.093078 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.196060 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.196118 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.196139 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.196164 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.196185 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.299351 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.299425 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.299447 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.299476 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.299498 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.334206 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 17:01:47.398925595 +0000 UTC Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.343274 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:52 crc kubenswrapper[4814]: E0122 05:19:52.343451 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.402883 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.402950 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.402975 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.403003 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.403026 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.505854 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.505928 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.505951 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.505989 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.506016 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.608908 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.608986 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.609014 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.609045 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.609065 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.711794 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.711851 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.711869 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.711893 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.711911 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.814798 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.814852 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.814870 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.814895 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.814913 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.917762 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.917829 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.917851 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.917880 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:52 crc kubenswrapper[4814]: I0122 05:19:52.917901 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:52Z","lastTransitionTime":"2026-01-22T05:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.020243 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.020288 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.020305 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.020329 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.020346 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.123774 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.123837 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.123860 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.123893 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.123915 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.226733 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.226795 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.226811 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.226834 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.226853 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.329036 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.329088 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.329101 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.329121 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.329134 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.334565 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 00:53:23.808703962 +0000 UTC Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.342849 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.342878 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.342988 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:53 crc kubenswrapper[4814]: E0122 05:19:53.343079 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:53 crc kubenswrapper[4814]: E0122 05:19:53.343256 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:53 crc kubenswrapper[4814]: E0122 05:19:53.343458 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.432417 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.432491 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.432516 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.432548 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.432573 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.535251 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.535323 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.535345 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.535373 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.535391 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.638121 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.638174 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.638188 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.638244 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.638261 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.741602 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.741710 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.741735 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.741769 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.741787 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.844811 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.844875 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.844897 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.844927 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.844950 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.948240 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.948305 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.948323 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.948349 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:53 crc kubenswrapper[4814]: I0122 05:19:53.948367 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:53Z","lastTransitionTime":"2026-01-22T05:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.052701 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.052779 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.052802 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.052831 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.052862 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.156234 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.156291 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.156314 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.156344 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.156367 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.259417 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.259476 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.259492 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.259518 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.259536 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.335254 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 16:01:42.146257422 +0000 UTC Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.344827 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:54 crc kubenswrapper[4814]: E0122 05:19:54.345001 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.364486 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.364534 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.364550 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.364570 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.364588 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.368011 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.383827 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.401592 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"362cbfbe-caa3-40b7-906c-80c378b01e0c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40207da77534f8f53d912e8b0d0236ccf0917c3d3891637d3f151982efca917d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4tsv9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-f57bg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.419111 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rq55l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"22017d22-7b4d-4e3d-bbae-ff564c64bd7b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:41Z\\\",\\\"message\\\":\\\"2026-01-22T05:18:56+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94\\\\n2026-01-22T05:18:56+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4669bc82-6e4f-4b56-bc69-c52d8f9ecb94 to /host/opt/cni/bin/\\\\n2026-01-22T05:18:56Z [verbose] multus-daemon started\\\\n2026-01-22T05:18:56Z [verbose] Readiness Indicator file check\\\\n2026-01-22T05:19:41Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55chl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rq55l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.434804 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9aa37e0c-14c8-46c7-8def-981092890be4\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://aba4edaa606d76148f3ab38f35ea09d933a2941ffbc1137e99a7a74f18aa43ab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c4b1170828d5d0e88d3e60432cbf3887c2821eaff1c1540e7e5765d44417c73\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f5fd93f04efc6d4ceb9a6483fe717567857eafa29470e1ca524baa91371d879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0015b5caaae67927a285dfbba2dadaece1a4e174013a8586fb643cccdf80e0df\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.450981 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2d30287d-8a99-47d2-8324-154bbcc54d97\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ec6aacc1738caa10c2a79d5d12c51c27e63f39b893b7a43efdffb80198a749b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef09fd3752bbdfdd752e456937d217fd00ca8fe28ecf38ef3c4478b9d40602c6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://875eb22b4f8c6e0d2c12f921d8fa1acaf07a7fd60a24eff29b1317ff0cdc8f1f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.468380 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.468436 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.468453 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.468478 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.468496 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.471784 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.490248 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e91d136d74b9bc9761c27c2d9e1af4a7a19bd686d70f2b02df1636ebe30dd2c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.502128 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78f0ef15-ba39-4f8f-b3df-7fb6671e7a79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a4d48283e77331fa93b8635d20e3499c96b051990e1dbf0817612736c0ecbbf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://993f23b2e9de90d23f547d647d20dddf78ff68f6c59dc540d9890df040eadca9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lrz8p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:08Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fxxmr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.515611 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc3683a5-0a9d-4263-adf7-beb44a5abb94\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cff7a5a68e2e3e125fb9198ce7dcf1b2c7470941e9bfe4206439e2c21f409f4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://800b42dbf9d307e5c329fc25d90f26d545fa25f0774fc8949eafb714343355c8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.531336 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"35389f4a-a247-4636-9091-1e9057355d49\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"message\\\":\\\"W0122 05:18:37.617703 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0122 05:18:37.617982 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769059117 cert, and key in /tmp/serving-cert-69539515/serving-signer.crt, /tmp/serving-cert-69539515/serving-signer.key\\\\nI0122 05:18:38.033244 1 observer_polling.go:159] Starting file observer\\\\nW0122 05:18:53.050460 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0122 05:18:53.050567 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0122 05:18:53.051709 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-69539515/tls.crt::/tmp/serving-cert-69539515/tls.key\\\\\\\"\\\\nI0122 05:18:54.354966 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0122 05:18:54.365081 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0122 05:18:54.365121 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0122 05:18:54.365147 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0122 05:18:54.365152 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0122 05:18:54.416726 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0122 05:18:54.425452 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:34Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.544118 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8718d9d8e34a26e3d22ef882109345ac6d7d86db094331234f3e70b864f8e66d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2af4162622b695c1dbed4f040162a4e7b3944abf225467c64cb5565b19cbe3c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.554844 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vnl4q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1333dbf9-2055-429e-89b1-463b28cff79c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f9e131be92fd78c14d627d32868bba93a3c214afa6f48d743d50cf2081d77f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qgf4r\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vnl4q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.569075 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vf9vr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:19:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-nmwv2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.570451 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.570514 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.570522 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.570555 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.570566 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.586681 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af3ffd3f7d9efaed3cf3c88f109a6f1eb00efdd4a12f544968ce026dbdf23943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.597998 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-5gzfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2887a737-4338-4fc7-a621-c4d9e74c05ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81a09322c96786f335eb9c4ae1e008e4423f7e7375b9812c35385065a8df21e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4kqgt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:53Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-5gzfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.616487 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"43672f8e-58cc-4665-840f-6477e084f0dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:19:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f582e9601950c4e9901e9604e3a0d3d22c0fe47711e2d55a72a23561a024236b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:19:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://777eb056a6cdfb20b8f6de27dad1b466f93a7ee0ed1acca43ed06fee0be00e50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e905546441f29ca4420c43c7febbf4d9909343273cbb96e43bfca7b01c88e7fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a640dd3a9c1d6be9cb055d1c5d3e3534f518b6566201e8c370753e41b1404f4b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea1bfbe708fa0181692306640607e7443342c987fffe41258d44e39c9d6921ed\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1d51905cdeac76299479d7a19dbbc456b2f646c9ab0582251480d55379c78277\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13f4efe0f01472b23b9e62d8caef010163e5b6deae40460f29a2d9ff748f0a14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:19:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c57bv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gpk6m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.634526 4814 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"55649399-9fd6-4e9a-b249-ce01b498c626\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-22T05:19:46Z\\\",\\\"message\\\":\\\"22 05:19:46.176742 6699 obj_retry.go:303] Retry object setup: *v1.Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nI0122 05:19:46.176751 6699 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nF0122 05:19:46.176758 6699 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:46Z is after 2025-08-24T17:21:41Z]\\\\nI0122 05:19:46.176763 6699 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc in node crc\\\\nI0122 05:19:46.176769 6699 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operato\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-22T05:19:45Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-22T05:18:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-22T05:18:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-22T05:18:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-q5scm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-22T05:18:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-wvzgj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-22T05:19:54Z is after 2025-08-24T17:21:41Z" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.673588 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.673647 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.673659 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.673678 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.673689 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.776450 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.776517 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.776543 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.776572 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.776597 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.880124 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.880204 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.880229 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.880255 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.880271 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.983526 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.983968 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.984121 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.984275 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:54 crc kubenswrapper[4814]: I0122 05:19:54.984406 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:54Z","lastTransitionTime":"2026-01-22T05:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.087274 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.087519 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.087717 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.087878 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.088026 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.190752 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.190830 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.190854 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.190887 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.190911 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.293919 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.293967 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.293979 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.293999 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.294010 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.336282 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 04:41:19.041874581 +0000 UTC Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.343666 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:55 crc kubenswrapper[4814]: E0122 05:19:55.344112 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.343888 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:55 crc kubenswrapper[4814]: E0122 05:19:55.344470 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.343703 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:55 crc kubenswrapper[4814]: E0122 05:19:55.344888 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.397757 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.398422 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.398619 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.398928 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.399159 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.502748 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.503453 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.503685 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.503866 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.504033 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.606741 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.606806 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.606829 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.606859 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.606881 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.709864 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.709913 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.709931 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.709953 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.709969 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.812501 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.812907 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.813117 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.813349 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.813547 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.916589 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.916675 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.916695 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.916723 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:55 crc kubenswrapper[4814]: I0122 05:19:55.916745 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:55Z","lastTransitionTime":"2026-01-22T05:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.018905 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.018962 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.018977 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.019000 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.019015 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.125840 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.126266 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.126411 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.126577 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.126717 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.229728 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.229820 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.229839 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.229863 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.229880 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.332520 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.332572 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.332590 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.332613 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.332650 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.336694 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 22:33:15.550957959 +0000 UTC Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.343097 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:56 crc kubenswrapper[4814]: E0122 05:19:56.343412 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.435208 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.435273 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.435290 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.435316 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.435334 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.538726 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.538783 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.538801 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.538827 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.538845 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.641843 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.641901 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.641918 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.641947 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.641968 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.745130 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.745175 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.745193 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.745218 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.745234 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.847688 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.847732 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.847746 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.847768 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.847783 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.951256 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.951333 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.951355 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.951387 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:56 crc kubenswrapper[4814]: I0122 05:19:56.951410 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:56Z","lastTransitionTime":"2026-01-22T05:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.054447 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.054485 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.054497 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.054519 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.054531 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.157004 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.157052 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.157062 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.157077 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.157088 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.186009 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.186218 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.18618601 +0000 UTC m=+147.269674255 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.259960 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.260020 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.260042 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.260071 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.260088 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.287392 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.287468 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.287568 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.287701 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.287743 4814 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.287890 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.287862983 +0000 UTC m=+147.371351238 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.287754 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.287960 4814 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.288023 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.288004237 +0000 UTC m=+147.371492492 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.287768 4814 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.288102 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.28808906 +0000 UTC m=+147.371577315 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.337068 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 12:55:31.201360614 +0000 UTC Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.343579 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.343659 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.343600 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.343803 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.343933 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.344110 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.363155 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.363213 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.363230 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.363262 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.363281 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.388932 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.389174 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.389201 4814 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.389221 4814 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:57 crc kubenswrapper[4814]: E0122 05:19:57.389289 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.389267398 +0000 UTC m=+147.472755653 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.467047 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.467126 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.467155 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.467188 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.467214 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.570527 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.570580 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.570598 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.570671 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.570691 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.673524 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.673595 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.673618 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.673687 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.673710 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.776723 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.776786 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.776803 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.776828 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.776844 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.880975 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.881043 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.881066 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.881098 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.881123 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.984081 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.984150 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.984181 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.984201 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:57 crc kubenswrapper[4814]: I0122 05:19:57.984214 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:57Z","lastTransitionTime":"2026-01-22T05:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.086656 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.086730 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.086743 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.086763 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.086774 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.189828 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.189891 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.189909 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.189931 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.189948 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.293254 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.293311 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.293329 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.293352 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.293369 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.338011 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 05:01:32.420872235 +0000 UTC Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.343426 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:19:58 crc kubenswrapper[4814]: E0122 05:19:58.343674 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.396140 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.396276 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.396296 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.396321 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.396338 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.500717 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.500774 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.500793 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.500821 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.500852 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.604484 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.604549 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.604567 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.604594 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.604612 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.708274 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.708328 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.708346 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.708369 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.708384 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.810991 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.811052 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.811069 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.811092 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.811110 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.915410 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.915472 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.915498 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.915533 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:58 crc kubenswrapper[4814]: I0122 05:19:58.915557 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:58Z","lastTransitionTime":"2026-01-22T05:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.018725 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.018794 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.018818 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.018849 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.018874 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:59Z","lastTransitionTime":"2026-01-22T05:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.121586 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.121685 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.121705 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.121729 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.121748 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:59Z","lastTransitionTime":"2026-01-22T05:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.225256 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.225315 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.225332 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.225356 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.225374 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:59Z","lastTransitionTime":"2026-01-22T05:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.328478 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.328541 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.328558 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.328585 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.328603 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:59Z","lastTransitionTime":"2026-01-22T05:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.338709 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 19:48:15.525952356 +0000 UTC Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.343142 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.343207 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:19:59 crc kubenswrapper[4814]: E0122 05:19:59.343339 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.343385 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:19:59 crc kubenswrapper[4814]: E0122 05:19:59.343505 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:19:59 crc kubenswrapper[4814]: E0122 05:19:59.343705 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.344732 4814 scope.go:117] "RemoveContainer" containerID="4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb" Jan 22 05:19:59 crc kubenswrapper[4814]: E0122 05:19:59.344987 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.431598 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.431730 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.431750 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.432156 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.432216 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:59Z","lastTransitionTime":"2026-01-22T05:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.521249 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.521324 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.521346 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.521383 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.521406 4814 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-22T05:19:59Z","lastTransitionTime":"2026-01-22T05:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.597579 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x"] Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.598161 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.603053 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.603162 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.603186 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.603420 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.613926 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/6bcd531a-d88e-4a15-b9dc-955bd596c181-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.614060 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6bcd531a-d88e-4a15-b9dc-955bd596c181-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.614106 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/6bcd531a-d88e-4a15-b9dc-955bd596c181-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.614139 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6bcd531a-d88e-4a15-b9dc-955bd596c181-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.614190 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6bcd531a-d88e-4a15-b9dc-955bd596c181-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.631009 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=14.630983067 podStartE2EDuration="14.630983067s" podCreationTimestamp="2026-01-22 05:19:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.630884694 +0000 UTC m=+85.714372949" watchObservedRunningTime="2026-01-22 05:19:59.630983067 +0000 UTC m=+85.714471322" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.679544 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=65.679517834 podStartE2EDuration="1m5.679517834s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.654950926 +0000 UTC m=+85.738439201" watchObservedRunningTime="2026-01-22 05:19:59.679517834 +0000 UTC m=+85.763006079" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.714887 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/6bcd531a-d88e-4a15-b9dc-955bd596c181-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.714979 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6bcd531a-d88e-4a15-b9dc-955bd596c181-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.715019 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/6bcd531a-d88e-4a15-b9dc-955bd596c181-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.715053 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6bcd531a-d88e-4a15-b9dc-955bd596c181-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.715104 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6bcd531a-d88e-4a15-b9dc-955bd596c181-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.715107 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/6bcd531a-d88e-4a15-b9dc-955bd596c181-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.715180 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/6bcd531a-d88e-4a15-b9dc-955bd596c181-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.716794 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6bcd531a-d88e-4a15-b9dc-955bd596c181-service-ca\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.720824 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-vnl4q" podStartSLOduration=66.720802139 podStartE2EDuration="1m6.720802139s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.701541517 +0000 UTC m=+85.785029762" watchObservedRunningTime="2026-01-22 05:19:59.720802139 +0000 UTC m=+85.804290384" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.726818 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6bcd531a-d88e-4a15-b9dc-955bd596c181-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.751129 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-5gzfx" podStartSLOduration=66.751107367 podStartE2EDuration="1m6.751107367s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.750433476 +0000 UTC m=+85.833921701" watchObservedRunningTime="2026-01-22 05:19:59.751107367 +0000 UTC m=+85.834595622" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.762695 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6bcd531a-d88e-4a15-b9dc-955bd596c181-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-c9m2x\" (UID: \"6bcd531a-d88e-4a15-b9dc-955bd596c181\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.783514 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-gpk6m" podStartSLOduration=66.783496626 podStartE2EDuration="1m6.783496626s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.782877518 +0000 UTC m=+85.866365753" watchObservedRunningTime="2026-01-22 05:19:59.783496626 +0000 UTC m=+85.866984841" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.886392 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podStartSLOduration=66.886376495 podStartE2EDuration="1m6.886376495s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.885341275 +0000 UTC m=+85.968829500" watchObservedRunningTime="2026-01-22 05:19:59.886376495 +0000 UTC m=+85.969864730" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.920349 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-rq55l" podStartSLOduration=66.920322791 podStartE2EDuration="1m6.920322791s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.903466962 +0000 UTC m=+85.986955187" watchObservedRunningTime="2026-01-22 05:19:59.920322791 +0000 UTC m=+86.003811016" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.921281 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=30.92127278 podStartE2EDuration="30.92127278s" podCreationTimestamp="2026-01-22 05:19:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.920109555 +0000 UTC m=+86.003597770" watchObservedRunningTime="2026-01-22 05:19:59.92127278 +0000 UTC m=+86.004761015" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.924083 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" Jan 22 05:19:59 crc kubenswrapper[4814]: I0122 05:19:59.944575 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=60.94455586 podStartE2EDuration="1m0.94455586s" podCreationTimestamp="2026-01-22 05:18:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:19:59.943786907 +0000 UTC m=+86.027275132" watchObservedRunningTime="2026-01-22 05:19:59.94455586 +0000 UTC m=+86.028044075" Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.339114 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 22:12:46.825886783 +0000 UTC Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.339216 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.343507 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:00 crc kubenswrapper[4814]: E0122 05:20:00.343739 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.352976 4814 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.367414 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fxxmr" podStartSLOduration=66.367389802 podStartE2EDuration="1m6.367389802s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:00.009841654 +0000 UTC m=+86.093329869" watchObservedRunningTime="2026-01-22 05:20:00.367389802 +0000 UTC m=+86.450878037" Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.369732 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.926146 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" event={"ID":"6bcd531a-d88e-4a15-b9dc-955bd596c181","Type":"ContainerStarted","Data":"32cd7966b077fd24eeade836ce88450d4b851793c71d38a9550facdcba0dbecb"} Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.926216 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" event={"ID":"6bcd531a-d88e-4a15-b9dc-955bd596c181","Type":"ContainerStarted","Data":"c005686dfa058271b46e626357ec2792b482e34afe8c10f2039a087ce70bb648"} Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.967726 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=0.967697593 podStartE2EDuration="967.697593ms" podCreationTimestamp="2026-01-22 05:20:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:00.962919511 +0000 UTC m=+87.046407786" watchObservedRunningTime="2026-01-22 05:20:00.967697593 +0000 UTC m=+87.051185848" Jan 22 05:20:00 crc kubenswrapper[4814]: I0122 05:20:00.987782 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-c9m2x" podStartSLOduration=67.987756357 podStartE2EDuration="1m7.987756357s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:00.985586693 +0000 UTC m=+87.069074988" watchObservedRunningTime="2026-01-22 05:20:00.987756357 +0000 UTC m=+87.071244612" Jan 22 05:20:01 crc kubenswrapper[4814]: I0122 05:20:01.343706 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:01 crc kubenswrapper[4814]: I0122 05:20:01.343761 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:01 crc kubenswrapper[4814]: I0122 05:20:01.343706 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:01 crc kubenswrapper[4814]: E0122 05:20:01.343836 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:01 crc kubenswrapper[4814]: E0122 05:20:01.343985 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:01 crc kubenswrapper[4814]: E0122 05:20:01.344033 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:02 crc kubenswrapper[4814]: I0122 05:20:02.342958 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:02 crc kubenswrapper[4814]: E0122 05:20:02.343162 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:03 crc kubenswrapper[4814]: I0122 05:20:03.343524 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:03 crc kubenswrapper[4814]: I0122 05:20:03.343533 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:03 crc kubenswrapper[4814]: I0122 05:20:03.343592 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:03 crc kubenswrapper[4814]: E0122 05:20:03.344154 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:03 crc kubenswrapper[4814]: E0122 05:20:03.344430 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:03 crc kubenswrapper[4814]: E0122 05:20:03.344520 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:04 crc kubenswrapper[4814]: I0122 05:20:04.343957 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:04 crc kubenswrapper[4814]: E0122 05:20:04.345928 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:05 crc kubenswrapper[4814]: I0122 05:20:05.343605 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:05 crc kubenswrapper[4814]: I0122 05:20:05.343664 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:05 crc kubenswrapper[4814]: E0122 05:20:05.343846 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:05 crc kubenswrapper[4814]: E0122 05:20:05.343956 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:05 crc kubenswrapper[4814]: I0122 05:20:05.343669 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:05 crc kubenswrapper[4814]: E0122 05:20:05.344992 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:06 crc kubenswrapper[4814]: I0122 05:20:06.343803 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:06 crc kubenswrapper[4814]: E0122 05:20:06.344034 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:07 crc kubenswrapper[4814]: I0122 05:20:07.342905 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:07 crc kubenswrapper[4814]: I0122 05:20:07.342966 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:07 crc kubenswrapper[4814]: E0122 05:20:07.343098 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:07 crc kubenswrapper[4814]: I0122 05:20:07.343140 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:07 crc kubenswrapper[4814]: E0122 05:20:07.343252 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:07 crc kubenswrapper[4814]: E0122 05:20:07.343367 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:08 crc kubenswrapper[4814]: I0122 05:20:08.342773 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:08 crc kubenswrapper[4814]: E0122 05:20:08.342968 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:09 crc kubenswrapper[4814]: I0122 05:20:09.343062 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:09 crc kubenswrapper[4814]: I0122 05:20:09.343171 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:09 crc kubenswrapper[4814]: I0122 05:20:09.343231 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:09 crc kubenswrapper[4814]: E0122 05:20:09.343481 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:09 crc kubenswrapper[4814]: E0122 05:20:09.343660 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:09 crc kubenswrapper[4814]: E0122 05:20:09.343873 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:10 crc kubenswrapper[4814]: I0122 05:20:10.343431 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:10 crc kubenswrapper[4814]: I0122 05:20:10.343780 4814 scope.go:117] "RemoveContainer" containerID="4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb" Jan 22 05:20:10 crc kubenswrapper[4814]: E0122 05:20:10.344036 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:20:10 crc kubenswrapper[4814]: E0122 05:20:10.344244 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:11 crc kubenswrapper[4814]: I0122 05:20:11.343163 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:11 crc kubenswrapper[4814]: I0122 05:20:11.343222 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:11 crc kubenswrapper[4814]: E0122 05:20:11.343257 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:11 crc kubenswrapper[4814]: I0122 05:20:11.343328 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:11 crc kubenswrapper[4814]: E0122 05:20:11.343447 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:11 crc kubenswrapper[4814]: E0122 05:20:11.343686 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:12 crc kubenswrapper[4814]: I0122 05:20:12.343085 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:12 crc kubenswrapper[4814]: E0122 05:20:12.343260 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:13 crc kubenswrapper[4814]: I0122 05:20:13.281838 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:13 crc kubenswrapper[4814]: E0122 05:20:13.282130 4814 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:20:13 crc kubenswrapper[4814]: E0122 05:20:13.282259 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs podName:33d4bb42-6c3b-4a42-bf7b-bb9a780f7873 nodeName:}" failed. No retries permitted until 2026-01-22 05:21:17.282227918 +0000 UTC m=+163.365716163 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs") pod "network-metrics-daemon-nmwv2" (UID: "33d4bb42-6c3b-4a42-bf7b-bb9a780f7873") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 22 05:20:13 crc kubenswrapper[4814]: I0122 05:20:13.343517 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:13 crc kubenswrapper[4814]: I0122 05:20:13.343550 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:13 crc kubenswrapper[4814]: I0122 05:20:13.343686 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:13 crc kubenswrapper[4814]: E0122 05:20:13.343760 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:13 crc kubenswrapper[4814]: E0122 05:20:13.343894 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:13 crc kubenswrapper[4814]: E0122 05:20:13.344048 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:14 crc kubenswrapper[4814]: I0122 05:20:14.343971 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:14 crc kubenswrapper[4814]: E0122 05:20:14.346174 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:15 crc kubenswrapper[4814]: I0122 05:20:15.343169 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:15 crc kubenswrapper[4814]: I0122 05:20:15.343233 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:15 crc kubenswrapper[4814]: I0122 05:20:15.343233 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:15 crc kubenswrapper[4814]: E0122 05:20:15.343366 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:15 crc kubenswrapper[4814]: E0122 05:20:15.343587 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:15 crc kubenswrapper[4814]: E0122 05:20:15.343744 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:16 crc kubenswrapper[4814]: I0122 05:20:16.343145 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:16 crc kubenswrapper[4814]: E0122 05:20:16.343698 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:17 crc kubenswrapper[4814]: I0122 05:20:17.342695 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:17 crc kubenswrapper[4814]: I0122 05:20:17.342816 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:17 crc kubenswrapper[4814]: I0122 05:20:17.342695 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:17 crc kubenswrapper[4814]: E0122 05:20:17.342889 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:17 crc kubenswrapper[4814]: E0122 05:20:17.343007 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:17 crc kubenswrapper[4814]: E0122 05:20:17.343141 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:18 crc kubenswrapper[4814]: I0122 05:20:18.343553 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:18 crc kubenswrapper[4814]: E0122 05:20:18.343806 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:19 crc kubenswrapper[4814]: I0122 05:20:19.342980 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:19 crc kubenswrapper[4814]: I0122 05:20:19.343061 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:19 crc kubenswrapper[4814]: I0122 05:20:19.343061 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:19 crc kubenswrapper[4814]: E0122 05:20:19.343155 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:19 crc kubenswrapper[4814]: E0122 05:20:19.343272 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:19 crc kubenswrapper[4814]: E0122 05:20:19.343372 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:20 crc kubenswrapper[4814]: I0122 05:20:20.343034 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:20 crc kubenswrapper[4814]: E0122 05:20:20.343224 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:21 crc kubenswrapper[4814]: I0122 05:20:21.342833 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:21 crc kubenswrapper[4814]: I0122 05:20:21.342833 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:21 crc kubenswrapper[4814]: E0122 05:20:21.343535 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:21 crc kubenswrapper[4814]: I0122 05:20:21.342860 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:21 crc kubenswrapper[4814]: E0122 05:20:21.343774 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:21 crc kubenswrapper[4814]: E0122 05:20:21.344071 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:22 crc kubenswrapper[4814]: I0122 05:20:22.343679 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:22 crc kubenswrapper[4814]: E0122 05:20:22.343882 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:22 crc kubenswrapper[4814]: I0122 05:20:22.345210 4814 scope.go:117] "RemoveContainer" containerID="4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb" Jan 22 05:20:22 crc kubenswrapper[4814]: E0122 05:20:22.345461 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-wvzgj_openshift-ovn-kubernetes(55649399-9fd6-4e9a-b249-ce01b498c626)\"" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" Jan 22 05:20:23 crc kubenswrapper[4814]: I0122 05:20:23.343431 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:23 crc kubenswrapper[4814]: I0122 05:20:23.343476 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:23 crc kubenswrapper[4814]: I0122 05:20:23.343476 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:23 crc kubenswrapper[4814]: E0122 05:20:23.343928 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:23 crc kubenswrapper[4814]: E0122 05:20:23.343675 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:23 crc kubenswrapper[4814]: E0122 05:20:23.344029 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:24 crc kubenswrapper[4814]: I0122 05:20:24.343519 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:24 crc kubenswrapper[4814]: E0122 05:20:24.345512 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:25 crc kubenswrapper[4814]: I0122 05:20:25.343181 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:25 crc kubenswrapper[4814]: I0122 05:20:25.343227 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:25 crc kubenswrapper[4814]: I0122 05:20:25.343244 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:25 crc kubenswrapper[4814]: E0122 05:20:25.343393 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:25 crc kubenswrapper[4814]: E0122 05:20:25.343879 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:25 crc kubenswrapper[4814]: E0122 05:20:25.344094 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:26 crc kubenswrapper[4814]: I0122 05:20:26.343084 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:26 crc kubenswrapper[4814]: E0122 05:20:26.343330 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:27 crc kubenswrapper[4814]: I0122 05:20:27.342778 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:27 crc kubenswrapper[4814]: I0122 05:20:27.342894 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:27 crc kubenswrapper[4814]: I0122 05:20:27.342912 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:27 crc kubenswrapper[4814]: E0122 05:20:27.342964 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:27 crc kubenswrapper[4814]: E0122 05:20:27.343044 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:27 crc kubenswrapper[4814]: E0122 05:20:27.343232 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:28 crc kubenswrapper[4814]: I0122 05:20:28.019974 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/1.log" Jan 22 05:20:28 crc kubenswrapper[4814]: I0122 05:20:28.020561 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/0.log" Jan 22 05:20:28 crc kubenswrapper[4814]: I0122 05:20:28.020612 4814 generic.go:334] "Generic (PLEG): container finished" podID="22017d22-7b4d-4e3d-bbae-ff564c64bd7b" containerID="f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181" exitCode=1 Jan 22 05:20:28 crc kubenswrapper[4814]: I0122 05:20:28.020667 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rq55l" event={"ID":"22017d22-7b4d-4e3d-bbae-ff564c64bd7b","Type":"ContainerDied","Data":"f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181"} Jan 22 05:20:28 crc kubenswrapper[4814]: I0122 05:20:28.020714 4814 scope.go:117] "RemoveContainer" containerID="1d48737f9560497d2f560c226c0cce33330e915093aa67b9391e71b8122e1abc" Jan 22 05:20:28 crc kubenswrapper[4814]: I0122 05:20:28.021093 4814 scope.go:117] "RemoveContainer" containerID="f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181" Jan 22 05:20:28 crc kubenswrapper[4814]: E0122 05:20:28.021306 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-rq55l_openshift-multus(22017d22-7b4d-4e3d-bbae-ff564c64bd7b)\"" pod="openshift-multus/multus-rq55l" podUID="22017d22-7b4d-4e3d-bbae-ff564c64bd7b" Jan 22 05:20:28 crc kubenswrapper[4814]: I0122 05:20:28.343523 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:28 crc kubenswrapper[4814]: E0122 05:20:28.343785 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:29 crc kubenswrapper[4814]: I0122 05:20:29.027322 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/1.log" Jan 22 05:20:29 crc kubenswrapper[4814]: I0122 05:20:29.343148 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:29 crc kubenswrapper[4814]: I0122 05:20:29.343203 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:29 crc kubenswrapper[4814]: I0122 05:20:29.343163 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:29 crc kubenswrapper[4814]: E0122 05:20:29.343323 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:29 crc kubenswrapper[4814]: E0122 05:20:29.343658 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:29 crc kubenswrapper[4814]: E0122 05:20:29.343794 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:30 crc kubenswrapper[4814]: I0122 05:20:30.343688 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:30 crc kubenswrapper[4814]: E0122 05:20:30.343913 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:31 crc kubenswrapper[4814]: I0122 05:20:31.342741 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:31 crc kubenswrapper[4814]: I0122 05:20:31.342819 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:31 crc kubenswrapper[4814]: I0122 05:20:31.342825 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:31 crc kubenswrapper[4814]: E0122 05:20:31.342907 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:31 crc kubenswrapper[4814]: E0122 05:20:31.342976 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:31 crc kubenswrapper[4814]: E0122 05:20:31.343141 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:32 crc kubenswrapper[4814]: I0122 05:20:32.343340 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:32 crc kubenswrapper[4814]: E0122 05:20:32.343714 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:33 crc kubenswrapper[4814]: I0122 05:20:33.343354 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:33 crc kubenswrapper[4814]: I0122 05:20:33.343432 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:33 crc kubenswrapper[4814]: I0122 05:20:33.343447 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:33 crc kubenswrapper[4814]: E0122 05:20:33.343540 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:33 crc kubenswrapper[4814]: E0122 05:20:33.343738 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:33 crc kubenswrapper[4814]: E0122 05:20:33.343936 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:34 crc kubenswrapper[4814]: E0122 05:20:34.293116 4814 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 22 05:20:34 crc kubenswrapper[4814]: I0122 05:20:34.342779 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:34 crc kubenswrapper[4814]: E0122 05:20:34.344741 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:34 crc kubenswrapper[4814]: E0122 05:20:34.443494 4814 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:20:35 crc kubenswrapper[4814]: I0122 05:20:35.343482 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:35 crc kubenswrapper[4814]: E0122 05:20:35.343721 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:35 crc kubenswrapper[4814]: I0122 05:20:35.343503 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:35 crc kubenswrapper[4814]: E0122 05:20:35.343868 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:35 crc kubenswrapper[4814]: I0122 05:20:35.343503 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:35 crc kubenswrapper[4814]: E0122 05:20:35.343964 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:36 crc kubenswrapper[4814]: I0122 05:20:36.343229 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:36 crc kubenswrapper[4814]: E0122 05:20:36.344857 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:37 crc kubenswrapper[4814]: I0122 05:20:37.343077 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:37 crc kubenswrapper[4814]: I0122 05:20:37.343210 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:37 crc kubenswrapper[4814]: E0122 05:20:37.343412 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:37 crc kubenswrapper[4814]: I0122 05:20:37.343468 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:37 crc kubenswrapper[4814]: E0122 05:20:37.343737 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:37 crc kubenswrapper[4814]: E0122 05:20:37.343822 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:37 crc kubenswrapper[4814]: I0122 05:20:37.346012 4814 scope.go:117] "RemoveContainer" containerID="4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb" Jan 22 05:20:38 crc kubenswrapper[4814]: I0122 05:20:38.064294 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/3.log" Jan 22 05:20:38 crc kubenswrapper[4814]: I0122 05:20:38.067216 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerStarted","Data":"74c22517da4f736a98526fca6fa3436f7c2cba2f848f165c31d69f178637895f"} Jan 22 05:20:38 crc kubenswrapper[4814]: I0122 05:20:38.067913 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:20:38 crc kubenswrapper[4814]: I0122 05:20:38.099328 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podStartSLOduration=104.099269262 podStartE2EDuration="1m44.099269262s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:38.096942243 +0000 UTC m=+124.180430528" watchObservedRunningTime="2026-01-22 05:20:38.099269262 +0000 UTC m=+124.182757517" Jan 22 05:20:38 crc kubenswrapper[4814]: I0122 05:20:38.342990 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:38 crc kubenswrapper[4814]: E0122 05:20:38.343412 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:38 crc kubenswrapper[4814]: I0122 05:20:38.343442 4814 scope.go:117] "RemoveContainer" containerID="f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181" Jan 22 05:20:38 crc kubenswrapper[4814]: I0122 05:20:38.418860 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-nmwv2"] Jan 22 05:20:38 crc kubenswrapper[4814]: I0122 05:20:38.418947 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:38 crc kubenswrapper[4814]: E0122 05:20:38.419025 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:39 crc kubenswrapper[4814]: I0122 05:20:39.073705 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/1.log" Jan 22 05:20:39 crc kubenswrapper[4814]: I0122 05:20:39.074130 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rq55l" event={"ID":"22017d22-7b4d-4e3d-bbae-ff564c64bd7b","Type":"ContainerStarted","Data":"dea1d487fb592deca0be2c7b5b5a107858c92384301dc9ef3976e3456777ab8e"} Jan 22 05:20:39 crc kubenswrapper[4814]: I0122 05:20:39.343353 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:39 crc kubenswrapper[4814]: I0122 05:20:39.343397 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:39 crc kubenswrapper[4814]: E0122 05:20:39.343530 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:39 crc kubenswrapper[4814]: E0122 05:20:39.343694 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:39 crc kubenswrapper[4814]: E0122 05:20:39.445063 4814 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 22 05:20:40 crc kubenswrapper[4814]: I0122 05:20:40.343157 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:40 crc kubenswrapper[4814]: I0122 05:20:40.343226 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:40 crc kubenswrapper[4814]: E0122 05:20:40.343351 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:40 crc kubenswrapper[4814]: E0122 05:20:40.343443 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:41 crc kubenswrapper[4814]: I0122 05:20:41.343459 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:41 crc kubenswrapper[4814]: I0122 05:20:41.343464 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:41 crc kubenswrapper[4814]: E0122 05:20:41.343678 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:41 crc kubenswrapper[4814]: E0122 05:20:41.343760 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:42 crc kubenswrapper[4814]: I0122 05:20:42.343393 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:42 crc kubenswrapper[4814]: I0122 05:20:42.343457 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:42 crc kubenswrapper[4814]: E0122 05:20:42.343621 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:42 crc kubenswrapper[4814]: E0122 05:20:42.343788 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:43 crc kubenswrapper[4814]: I0122 05:20:43.343573 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:43 crc kubenswrapper[4814]: I0122 05:20:43.343573 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:43 crc kubenswrapper[4814]: E0122 05:20:43.343796 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 22 05:20:43 crc kubenswrapper[4814]: E0122 05:20:43.343874 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 22 05:20:44 crc kubenswrapper[4814]: I0122 05:20:44.343159 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:44 crc kubenswrapper[4814]: I0122 05:20:44.343304 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:44 crc kubenswrapper[4814]: E0122 05:20:44.345221 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nmwv2" podUID="33d4bb42-6c3b-4a42-bf7b-bb9a780f7873" Jan 22 05:20:44 crc kubenswrapper[4814]: E0122 05:20:44.345325 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 22 05:20:45 crc kubenswrapper[4814]: I0122 05:20:45.343776 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:20:45 crc kubenswrapper[4814]: I0122 05:20:45.343781 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:20:45 crc kubenswrapper[4814]: I0122 05:20:45.347181 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 22 05:20:45 crc kubenswrapper[4814]: I0122 05:20:45.348198 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 22 05:20:46 crc kubenswrapper[4814]: I0122 05:20:46.343657 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:20:46 crc kubenswrapper[4814]: I0122 05:20:46.343666 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:20:46 crc kubenswrapper[4814]: I0122 05:20:46.348221 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 22 05:20:46 crc kubenswrapper[4814]: I0122 05:20:46.348371 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 22 05:20:46 crc kubenswrapper[4814]: I0122 05:20:46.349916 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 22 05:20:46 crc kubenswrapper[4814]: I0122 05:20:46.351602 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 22 05:20:48 crc kubenswrapper[4814]: I0122 05:20:48.419740 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.661528 4814 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.711031 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hvr97"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.712381 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.714054 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.714772 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.724020 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.724207 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.724279 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.724326 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.724435 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.724494 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.724754 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.724850 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.725126 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.725574 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.725710 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.725944 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.725948 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.726196 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.726335 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.727262 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ggbzf"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.732509 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.748135 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.761804 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.762451 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.762748 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.762933 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.763129 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.763173 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765359 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/70a9137f-7cb9-429a-b0fb-76f7184e7936-audit-dir\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765419 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765473 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqv8k\" (UniqueName: \"kubernetes.io/projected/70a9137f-7cb9-429a-b0fb-76f7184e7936-kube-api-access-lqv8k\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765502 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765532 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-client-ca\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765564 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-etcd-serving-ca\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765615 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-config\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765675 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-client-ca\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.765952 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-encryption-config\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766015 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/70a9137f-7cb9-429a-b0fb-76f7184e7936-node-pullsecrets\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766046 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-serving-cert\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766097 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-config\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766128 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47d42067-0194-4d5d-8cc8-a49e9065bc9b-serving-cert\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766162 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kgsm\" (UniqueName: \"kubernetes.io/projected/47d42067-0194-4d5d-8cc8-a49e9065bc9b-kube-api-access-5kgsm\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766190 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-image-import-ca\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766225 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-config\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766264 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5388d463-7ff7-4465-ab69-3d0015d91232-serving-cert\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766293 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxcw5\" (UniqueName: \"kubernetes.io/projected/5388d463-7ff7-4465-ab69-3d0015d91232-kube-api-access-mxcw5\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766340 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766394 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-audit\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766452 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-etcd-client\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766481 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766591 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766727 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766873 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.766885 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.767143 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.767179 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.767824 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.768044 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.770920 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.771108 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.771709 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.771860 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.772001 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.776083 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-t5hkb"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.776609 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.778339 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.779768 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-kst8c"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.780125 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.782307 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-fsdht"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.782837 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-h7dw7"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.783175 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.783550 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fsdht" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.787783 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.788235 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mhktc"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.788544 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.788946 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.788988 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.803797 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804119 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804163 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804218 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804281 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804284 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804339 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804400 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804520 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.804685 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.805237 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.805416 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.805560 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.805755 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.805959 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.806059 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.806093 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.806197 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.806261 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.808366 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-frrqc"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.808981 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.809120 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.809213 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.809330 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-jnnrg"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.809226 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.809655 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.809934 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.809570 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.810217 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hvr97"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.810284 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-bmlds"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.810573 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.810771 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.810832 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.810944 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.811243 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.810773 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.811551 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.811577 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-b86l9"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.812115 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.812128 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.812136 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.812382 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.812403 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.812503 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.813433 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.810774 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.814906 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.815163 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.815460 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kt2c2"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.815792 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.816134 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.834159 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.836324 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.836459 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.837789 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.866230 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.866585 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.866776 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.867018 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.867102 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.867237 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.867345 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.867518 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.867690 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.867878 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870402 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kgsm\" (UniqueName: \"kubernetes.io/projected/47d42067-0194-4d5d-8cc8-a49e9065bc9b-kube-api-access-5kgsm\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870438 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-image-import-ca\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870465 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-config\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870495 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5388d463-7ff7-4465-ab69-3d0015d91232-serving-cert\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870514 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxcw5\" (UniqueName: \"kubernetes.io/projected/5388d463-7ff7-4465-ab69-3d0015d91232-kube-api-access-mxcw5\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870543 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-audit\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870562 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-etcd-client\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870584 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/70a9137f-7cb9-429a-b0fb-76f7184e7936-audit-dir\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870614 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870650 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqv8k\" (UniqueName: \"kubernetes.io/projected/70a9137f-7cb9-429a-b0fb-76f7184e7936-kube-api-access-lqv8k\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870670 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-client-ca\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870690 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870709 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-etcd-serving-ca\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870740 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-config\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870762 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-client-ca\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870785 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-encryption-config\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870808 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/70a9137f-7cb9-429a-b0fb-76f7184e7936-node-pullsecrets\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870825 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-serving-cert\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870857 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-config\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.870878 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47d42067-0194-4d5d-8cc8-a49e9065bc9b-serving-cert\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871173 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871371 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871489 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871552 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871599 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871724 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871804 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871915 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871994 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.872088 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.872173 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.872320 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.872419 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.872511 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.872585 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.872822 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-image-import-ca\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.873138 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.873229 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.873330 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.873408 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.871730 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.873552 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.873654 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.873752 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.874232 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.874279 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.874343 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.875591 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.879501 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.879778 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.880009 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.880477 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.880795 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.881325 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.881542 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.882228 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.883512 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.887940 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.888199 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.894808 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-config\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.895076 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.896156 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.896604 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-audit\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.899974 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.900372 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g9h6j"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.900732 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-xd5fb"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.900919 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/70a9137f-7cb9-429a-b0fb-76f7184e7936-audit-dir\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.901079 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-sshbr"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.901183 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.901351 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-trusted-ca-bundle\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.901493 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.901953 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mc5wq"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.902105 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-client-ca\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.917197 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-etcd-serving-ca\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.917607 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70a9137f-7cb9-429a-b0fb-76f7184e7936-config\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.917728 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.918027 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.918381 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.918944 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.919108 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/70a9137f-7cb9-429a-b0fb-76f7184e7936-node-pullsecrets\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.920171 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47d42067-0194-4d5d-8cc8-a49e9065bc9b-serving-cert\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.924135 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-config\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.931920 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.932203 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.932308 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-client-ca\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.933772 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.938506 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5388d463-7ff7-4465-ab69-3d0015d91232-serving-cert\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.941326 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-encryption-config\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.945032 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-etcd-client\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.963351 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.963771 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.963868 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.964097 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.964174 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-rq5tj"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.964611 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.965054 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.965491 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.965679 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.965681 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fsdht"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.965765 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.965940 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.966072 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.967693 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-t5hkb"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.967766 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-g4spg"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.968727 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971476 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-audit-policies\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971504 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nstfc\" (UniqueName: \"kubernetes.io/projected/7b8abd82-5187-4029-82aa-f0d5495ce298-kube-api-access-nstfc\") pod \"cluster-samples-operator-665b6dd947-wlpxt\" (UID: \"7b8abd82-5187-4029-82aa-f0d5495ce298\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971530 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43690e91-5b82-4acb-972e-24159036039f-config\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971548 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971565 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7s58f\" (UniqueName: \"kubernetes.io/projected/44b904d6-2898-43bb-a072-54661fe953cd-kube-api-access-7s58f\") pod \"package-server-manager-789f6589d5-dh628\" (UID: \"44b904d6-2898-43bb-a072-54661fe953cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971584 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971608 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1a79fa9b-7c1f-487c-96f0-bda318f2180d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971638 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5552558-72a7-40d0-a265-450ce55c22ad-apiservice-cert\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971663 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-etcd-client\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971679 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-config\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971696 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xjps\" (UniqueName: \"kubernetes.io/projected/698622fa-b0b2-4099-995f-9c723376c176-kube-api-access-6xjps\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971711 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971725 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5552558-72a7-40d0-a265-450ce55c22ad-webhook-cert\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971742 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74dhz\" (UniqueName: \"kubernetes.io/projected/f6117233-bf75-4d0a-b930-099b1021a9ac-kube-api-access-74dhz\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971757 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d1fb380d-478d-4925-a8f3-bfe8ac8a40d6-metrics-tls\") pod \"dns-operator-744455d44c-b86l9\" (UID: \"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6\") " pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971773 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dda1c739-e778-4794-b1a3-cf1db49fd7df-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971788 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brgtj\" (UniqueName: \"kubernetes.io/projected/1a79fa9b-7c1f-487c-96f0-bda318f2180d-kube-api-access-brgtj\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971804 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m68h9\" (UniqueName: \"kubernetes.io/projected/d1fb380d-478d-4925-a8f3-bfe8ac8a40d6-kube-api-access-m68h9\") pod \"dns-operator-744455d44c-b86l9\" (UID: \"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6\") " pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971818 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvz56\" (UniqueName: \"kubernetes.io/projected/f5552558-72a7-40d0-a265-450ce55c22ad-kube-api-access-wvz56\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971834 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7b8abd82-5187-4029-82aa-f0d5495ce298-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wlpxt\" (UID: \"7b8abd82-5187-4029-82aa-f0d5495ce298\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971862 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3e80fbf9-0347-4103-b749-f3a0a9f5e485-bound-sa-token\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971876 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971892 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-trusted-ca-bundle\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971907 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1980267a-5bfc-40b9-abe6-ae0e9774910c-auth-proxy-config\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971924 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-config\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971948 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1980267a-5bfc-40b9-abe6-ae0e9774910c-machine-approver-tls\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971964 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.971979 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972004 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43690e91-5b82-4acb-972e-24159036039f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972020 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972039 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dda1c739-e778-4794-b1a3-cf1db49fd7df-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972056 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/968a156c-de1c-4d13-bfad-6596916711d5-audit-dir\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972071 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698622fa-b0b2-4099-995f-9c723376c176-config\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.966616 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/70a9137f-7cb9-429a-b0fb-76f7184e7936-serving-cert\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972161 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2gq6\" (UniqueName: \"kubernetes.io/projected/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-kube-api-access-j2gq6\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972179 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972196 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972212 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np8fb\" (UniqueName: \"kubernetes.io/projected/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-kube-api-access-np8fb\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972332 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b75d4696-814c-420a-8283-df0ce39bdca7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972371 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972396 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b75d4696-814c-420a-8283-df0ce39bdca7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972417 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43690e91-5b82-4acb-972e-24159036039f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972435 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-serving-cert\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972456 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f5552558-72a7-40d0-a265-450ce55c22ad-tmpfs\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972474 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-encryption-config\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972547 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rnpb\" (UniqueName: \"kubernetes.io/projected/63d01f3f-6487-4147-a862-70739c2c7961-kube-api-access-4rnpb\") pod \"downloads-7954f5f757-fsdht\" (UID: \"63d01f3f-6487-4147-a862-70739c2c7961\") " pod="openshift-console/downloads-7954f5f757-fsdht" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972601 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-oauth-config\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972667 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972819 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-oauth-serving-cert\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972854 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/44b904d6-2898-43bb-a072-54661fe953cd-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-dh628\" (UID: \"44b904d6-2898-43bb-a072-54661fe953cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972880 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6117233-bf75-4d0a-b930-099b1021a9ac-serving-cert\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972902 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-serving-cert\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972922 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-policies\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.972939 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z48ms\" (UniqueName: \"kubernetes.io/projected/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-kube-api-access-z48ms\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973031 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1a79fa9b-7c1f-487c-96f0-bda318f2180d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973057 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3e80fbf9-0347-4103-b749-f3a0a9f5e485-metrics-tls\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973268 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-service-ca-bundle\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973304 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973355 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1a79fa9b-7c1f-487c-96f0-bda318f2180d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973387 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973455 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwlvt\" (UniqueName: \"kubernetes.io/projected/3e80fbf9-0347-4103-b749-f3a0a9f5e485-kube-api-access-wwlvt\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973495 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/698622fa-b0b2-4099-995f-9c723376c176-serving-cert\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973520 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1980267a-5bfc-40b9-abe6-ae0e9774910c-config\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973560 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvbxz\" (UniqueName: \"kubernetes.io/projected/21498fe0-31d4-40b6-aa3a-c1cf4047c155-kube-api-access-tvbxz\") pod \"migrator-59844c95c7-tmm6k\" (UID: \"21498fe0-31d4-40b6-aa3a-c1cf4047c155\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973582 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcfhx\" (UniqueName: \"kubernetes.io/projected/dda1c739-e778-4794-b1a3-cf1db49fd7df-kube-api-access-rcfhx\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973647 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973866 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973913 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/698622fa-b0b2-4099-995f-9c723376c176-trusted-ca\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973948 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.973993 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b75d4696-814c-420a-8283-df0ce39bdca7-config\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.974017 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlm45\" (UniqueName: \"kubernetes.io/projected/1980267a-5bfc-40b9-abe6-ae0e9774910c-kube-api-access-dlm45\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.974037 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65xld\" (UniqueName: \"kubernetes.io/projected/968a156c-de1c-4d13-bfad-6596916711d5-kube-api-access-65xld\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.974080 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3e80fbf9-0347-4103-b749-f3a0a9f5e485-trusted-ca\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.974096 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-dir\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.974111 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-service-ca\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.974523 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.974567 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.975266 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-2w4zq"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.976464 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2w4zq" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.976515 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.980484 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mhktc"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.982185 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ggbzf"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.986501 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-frrqc"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.994243 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-bmlds"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.995708 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-b86l9"] Jan 22 05:20:50 crc kubenswrapper[4814]: I0122 05:20:50.997104 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.000732 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.003170 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.005329 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.006343 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.008357 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-sshbr"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.009701 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-h7dw7"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.013139 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-jnnrg"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.014571 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.015941 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-kst8c"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.018938 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.023181 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.025519 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.026258 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.027632 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.028644 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.029677 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.032065 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.033308 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.033857 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.034322 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mc5wq"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.035349 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.036419 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g9h6j"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.037615 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5xkqn"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.038467 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.038846 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.040064 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.041509 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.042873 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-4spp2"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.043436 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.044037 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-rq5tj"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.048343 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-g4spg"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.048394 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kt2c2"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.056326 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.057650 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2w4zq"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.058724 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5xkqn"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.059973 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075425 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075426 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-service-ca-bundle\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075524 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075542 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1a79fa9b-7c1f-487c-96f0-bda318f2180d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075561 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075579 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwlvt\" (UniqueName: \"kubernetes.io/projected/3e80fbf9-0347-4103-b749-f3a0a9f5e485-kube-api-access-wwlvt\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075855 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/698622fa-b0b2-4099-995f-9c723376c176-serving-cert\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075909 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1980267a-5bfc-40b9-abe6-ae0e9774910c-config\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076267 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvbxz\" (UniqueName: \"kubernetes.io/projected/21498fe0-31d4-40b6-aa3a-c1cf4047c155-kube-api-access-tvbxz\") pod \"migrator-59844c95c7-tmm6k\" (UID: \"21498fe0-31d4-40b6-aa3a-c1cf4047c155\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076294 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcfhx\" (UniqueName: \"kubernetes.io/projected/dda1c739-e778-4794-b1a3-cf1db49fd7df-kube-api-access-rcfhx\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076343 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076361 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076376 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076392 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/698622fa-b0b2-4099-995f-9c723376c176-trusted-ca\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076435 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b75d4696-814c-420a-8283-df0ce39bdca7-config\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076453 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlm45\" (UniqueName: \"kubernetes.io/projected/1980267a-5bfc-40b9-abe6-ae0e9774910c-kube-api-access-dlm45\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076494 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65xld\" (UniqueName: \"kubernetes.io/projected/968a156c-de1c-4d13-bfad-6596916711d5-kube-api-access-65xld\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076512 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3e80fbf9-0347-4103-b749-f3a0a9f5e485-trusted-ca\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076530 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-dir\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076547 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-service-ca\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076582 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-audit-policies\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076599 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nstfc\" (UniqueName: \"kubernetes.io/projected/7b8abd82-5187-4029-82aa-f0d5495ce298-kube-api-access-nstfc\") pod \"cluster-samples-operator-665b6dd947-wlpxt\" (UID: \"7b8abd82-5187-4029-82aa-f0d5495ce298\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076615 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43690e91-5b82-4acb-972e-24159036039f-config\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076658 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076678 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7s58f\" (UniqueName: \"kubernetes.io/projected/44b904d6-2898-43bb-a072-54661fe953cd-kube-api-access-7s58f\") pod \"package-server-manager-789f6589d5-dh628\" (UID: \"44b904d6-2898-43bb-a072-54661fe953cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076705 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.075917 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-service-ca-bundle\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076742 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1a79fa9b-7c1f-487c-96f0-bda318f2180d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076887 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5552558-72a7-40d0-a265-450ce55c22ad-apiservice-cert\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076917 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-etcd-client\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076940 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-config\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076967 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xjps\" (UniqueName: \"kubernetes.io/projected/698622fa-b0b2-4099-995f-9c723376c176-kube-api-access-6xjps\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076991 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-dir\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077018 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077043 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5552558-72a7-40d0-a265-450ce55c22ad-webhook-cert\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077070 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74dhz\" (UniqueName: \"kubernetes.io/projected/f6117233-bf75-4d0a-b930-099b1021a9ac-kube-api-access-74dhz\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077093 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d1fb380d-478d-4925-a8f3-bfe8ac8a40d6-metrics-tls\") pod \"dns-operator-744455d44c-b86l9\" (UID: \"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6\") " pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077097 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077117 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dda1c739-e778-4794-b1a3-cf1db49fd7df-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077141 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brgtj\" (UniqueName: \"kubernetes.io/projected/1a79fa9b-7c1f-487c-96f0-bda318f2180d-kube-api-access-brgtj\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077165 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m68h9\" (UniqueName: \"kubernetes.io/projected/d1fb380d-478d-4925-a8f3-bfe8ac8a40d6-kube-api-access-m68h9\") pod \"dns-operator-744455d44c-b86l9\" (UID: \"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6\") " pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077189 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvz56\" (UniqueName: \"kubernetes.io/projected/f5552558-72a7-40d0-a265-450ce55c22ad-kube-api-access-wvz56\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077217 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7b8abd82-5187-4029-82aa-f0d5495ce298-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wlpxt\" (UID: \"7b8abd82-5187-4029-82aa-f0d5495ce298\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077240 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3e80fbf9-0347-4103-b749-f3a0a9f5e485-bound-sa-token\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077264 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077289 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-trusted-ca-bundle\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076189 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077369 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1980267a-5bfc-40b9-abe6-ae0e9774910c-auth-proxy-config\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077398 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-config\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077422 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1980267a-5bfc-40b9-abe6-ae0e9774910c-machine-approver-tls\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077446 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077469 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077495 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43690e91-5b82-4acb-972e-24159036039f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077518 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077541 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dda1c739-e778-4794-b1a3-cf1db49fd7df-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077564 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/968a156c-de1c-4d13-bfad-6596916711d5-audit-dir\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077607 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698622fa-b0b2-4099-995f-9c723376c176-config\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077653 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2gq6\" (UniqueName: \"kubernetes.io/projected/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-kube-api-access-j2gq6\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077681 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077708 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079344 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np8fb\" (UniqueName: \"kubernetes.io/projected/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-kube-api-access-np8fb\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079389 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079419 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b75d4696-814c-420a-8283-df0ce39bdca7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079438 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b75d4696-814c-420a-8283-df0ce39bdca7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079458 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-encryption-config\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079474 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43690e91-5b82-4acb-972e-24159036039f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079490 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-serving-cert\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079506 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f5552558-72a7-40d0-a265-450ce55c22ad-tmpfs\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079531 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rnpb\" (UniqueName: \"kubernetes.io/projected/63d01f3f-6487-4147-a862-70739c2c7961-kube-api-access-4rnpb\") pod \"downloads-7954f5f757-fsdht\" (UID: \"63d01f3f-6487-4147-a862-70739c2c7961\") " pod="openshift-console/downloads-7954f5f757-fsdht" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079546 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-oauth-config\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079562 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-serving-cert\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079578 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079595 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-oauth-serving-cert\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079613 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/44b904d6-2898-43bb-a072-54661fe953cd-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-dh628\" (UID: \"44b904d6-2898-43bb-a072-54661fe953cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079642 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6117233-bf75-4d0a-b930-099b1021a9ac-serving-cert\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079661 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-policies\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079678 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z48ms\" (UniqueName: \"kubernetes.io/projected/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-kube-api-access-z48ms\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079710 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1a79fa9b-7c1f-487c-96f0-bda318f2180d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.079726 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3e80fbf9-0347-4103-b749-f3a0a9f5e485-metrics-tls\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077905 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.078048 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/968a156c-de1c-4d13-bfad-6596916711d5-audit-dir\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.080843 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.081300 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.081393 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.078084 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-audit-policies\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.078420 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f6117233-bf75-4d0a-b930-099b1021a9ac-config\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.076675 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1980267a-5bfc-40b9-abe6-ae0e9774910c-config\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.081637 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/698622fa-b0b2-4099-995f-9c723376c176-trusted-ca\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.082208 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/698622fa-b0b2-4099-995f-9c723376c176-config\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.082257 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/698622fa-b0b2-4099-995f-9c723376c176-serving-cert\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.082406 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-etcd-client\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.082964 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-config\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.083061 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-trusted-ca-bundle\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.083388 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dda1c739-e778-4794-b1a3-cf1db49fd7df-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.083426 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1980267a-5bfc-40b9-abe6-ae0e9774910c-auth-proxy-config\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.077655 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-service-ca\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.083753 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/f5552558-72a7-40d0-a265-450ce55c22ad-tmpfs\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.084075 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/968a156c-de1c-4d13-bfad-6596916711d5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.084647 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.084655 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-oauth-serving-cert\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.084707 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-policies\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.085793 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dda1c739-e778-4794-b1a3-cf1db49fd7df-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.086269 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d1fb380d-478d-4925-a8f3-bfe8ac8a40d6-metrics-tls\") pod \"dns-operator-744455d44c-b86l9\" (UID: \"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6\") " pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.086703 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/1a79fa9b-7c1f-487c-96f0-bda318f2180d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.086824 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7b8abd82-5187-4029-82aa-f0d5495ce298-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wlpxt\" (UID: \"7b8abd82-5187-4029-82aa-f0d5495ce298\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.086968 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-encryption-config\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.087099 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.087111 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.087438 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/1980267a-5bfc-40b9-abe6-ae0e9774910c-machine-approver-tls\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.087542 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.087864 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.088107 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.088819 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.089156 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-oauth-config\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.089861 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f6117233-bf75-4d0a-b930-099b1021a9ac-serving-cert\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.089933 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/968a156c-de1c-4d13-bfad-6596916711d5-serving-cert\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.089982 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.091124 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.092376 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-serving-cert\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.094851 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.114033 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.125066 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b75d4696-814c-420a-8283-df0ce39bdca7-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.134701 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.138493 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b75d4696-814c-420a-8283-df0ce39bdca7-config\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.165883 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.168561 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1a79fa9b-7c1f-487c-96f0-bda318f2180d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.174978 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.193853 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.214499 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.234498 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.255338 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.274641 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.294737 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.314536 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.335120 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.354074 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.364129 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3e80fbf9-0347-4103-b749-f3a0a9f5e485-metrics-tls\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.381317 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.388800 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3e80fbf9-0347-4103-b749-f3a0a9f5e485-trusted-ca\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.394578 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.414997 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.435592 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.456435 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.503143 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqv8k\" (UniqueName: \"kubernetes.io/projected/70a9137f-7cb9-429a-b0fb-76f7184e7936-kube-api-access-lqv8k\") pod \"apiserver-76f77b778f-hvr97\" (UID: \"70a9137f-7cb9-429a-b0fb-76f7184e7936\") " pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.525567 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kgsm\" (UniqueName: \"kubernetes.io/projected/47d42067-0194-4d5d-8cc8-a49e9065bc9b-kube-api-access-5kgsm\") pod \"route-controller-manager-6576b87f9c-c9hn7\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.535813 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.555520 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.576813 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.595898 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.615001 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.635265 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.642010 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5552558-72a7-40d0-a265-450ce55c22ad-webhook-cert\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.642514 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5552558-72a7-40d0-a265-450ce55c22ad-apiservice-cert\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.655826 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.675009 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.682850 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.693348 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/43690e91-5b82-4acb-972e-24159036039f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.695667 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.697847 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.700102 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43690e91-5b82-4acb-972e-24159036039f-config\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.718240 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.755190 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.758077 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxcw5\" (UniqueName: \"kubernetes.io/projected/5388d463-7ff7-4465-ab69-3d0015d91232-kube-api-access-mxcw5\") pod \"controller-manager-879f6c89f-ggbzf\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.775808 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.794702 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.814300 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.835856 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.855015 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.874792 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.894424 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.922486 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.935461 4814 request.go:700] Waited for 1.016642736s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/secrets?fieldSelector=metadata.name%3Dserving-cert&limit=500&resourceVersion=0 Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.936251 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.937226 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 22 05:20:51 crc kubenswrapper[4814]: W0122 05:20:51.945335 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47d42067_0194_4d5d_8cc8_a49e9065bc9b.slice/crio-653f4c843b776e66c52ddd36b71975c110ff391a9b2aa42ab7844d924008604c WatchSource:0}: Error finding container 653f4c843b776e66c52ddd36b71975c110ff391a9b2aa42ab7844d924008604c: Status 404 returned error can't find the container with id 653f4c843b776e66c52ddd36b71975c110ff391a9b2aa42ab7844d924008604c Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.954028 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.971266 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-hvr97"] Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.974702 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 22 05:20:51 crc kubenswrapper[4814]: W0122 05:20:51.980949 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70a9137f_7cb9_429a_b0fb_76f7184e7936.slice/crio-b3961240faa34ad71569d4c4ea1ac7ac8f3bc31b5a7a7319a8c6a422f9a3916b WatchSource:0}: Error finding container b3961240faa34ad71569d4c4ea1ac7ac8f3bc31b5a7a7319a8c6a422f9a3916b: Status 404 returned error can't find the container with id b3961240faa34ad71569d4c4ea1ac7ac8f3bc31b5a7a7319a8c6a422f9a3916b Jan 22 05:20:51 crc kubenswrapper[4814]: I0122 05:20:51.994139 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.010743 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.015225 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.035314 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.055285 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.077898 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 22 05:20:52 crc kubenswrapper[4814]: E0122 05:20:52.085195 4814 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 22 05:20:52 crc kubenswrapper[4814]: E0122 05:20:52.085274 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/44b904d6-2898-43bb-a072-54661fe953cd-package-server-manager-serving-cert podName:44b904d6-2898-43bb-a072-54661fe953cd nodeName:}" failed. No retries permitted until 2026-01-22 05:20:52.585253977 +0000 UTC m=+138.668742212 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/44b904d6-2898-43bb-a072-54661fe953cd-package-server-manager-serving-cert") pod "package-server-manager-789f6589d5-dh628" (UID: "44b904d6-2898-43bb-a072-54661fe953cd") : failed to sync secret cache: timed out waiting for the condition Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.094465 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.120490 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.124175 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" event={"ID":"70a9137f-7cb9-429a-b0fb-76f7184e7936","Type":"ContainerStarted","Data":"b3961240faa34ad71569d4c4ea1ac7ac8f3bc31b5a7a7319a8c6a422f9a3916b"} Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.125158 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" event={"ID":"47d42067-0194-4d5d-8cc8-a49e9065bc9b","Type":"ContainerStarted","Data":"653f4c843b776e66c52ddd36b71975c110ff391a9b2aa42ab7844d924008604c"} Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.134531 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.154689 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.157151 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ggbzf"] Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.174973 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.195351 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.214652 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.234021 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.255233 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.274868 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.295016 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.315549 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.335334 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.375795 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.395366 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.414896 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.437376 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.455162 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.475068 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.494406 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.515517 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.535114 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.555753 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.575117 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.605390 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/44b904d6-2898-43bb-a072-54661fe953cd-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-dh628\" (UID: \"44b904d6-2898-43bb-a072-54661fe953cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.608823 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.615550 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.617662 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/44b904d6-2898-43bb-a072-54661fe953cd-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-dh628\" (UID: \"44b904d6-2898-43bb-a072-54661fe953cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.635897 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.654540 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.675570 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.695187 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.715274 4814 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.735095 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.755187 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.776268 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.795416 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.815540 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.862049 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwlvt\" (UniqueName: \"kubernetes.io/projected/3e80fbf9-0347-4103-b749-f3a0a9f5e485-kube-api-access-wwlvt\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.884306 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlm45\" (UniqueName: \"kubernetes.io/projected/1980267a-5bfc-40b9-abe6-ae0e9774910c-kube-api-access-dlm45\") pod \"machine-approver-56656f9798-zwdxr\" (UID: \"1980267a-5bfc-40b9-abe6-ae0e9774910c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.906111 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65xld\" (UniqueName: \"kubernetes.io/projected/968a156c-de1c-4d13-bfad-6596916711d5-kube-api-access-65xld\") pod \"apiserver-7bbb656c7d-xmp5c\" (UID: \"968a156c-de1c-4d13-bfad-6596916711d5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.929683 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvbxz\" (UniqueName: \"kubernetes.io/projected/21498fe0-31d4-40b6-aa3a-c1cf4047c155-kube-api-access-tvbxz\") pod \"migrator-59844c95c7-tmm6k\" (UID: \"21498fe0-31d4-40b6-aa3a-c1cf4047c155\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.944021 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcfhx\" (UniqueName: \"kubernetes.io/projected/dda1c739-e778-4794-b1a3-cf1db49fd7df-kube-api-access-rcfhx\") pod \"openshift-controller-manager-operator-756b6f6bc6-5mqhp\" (UID: \"dda1c739-e778-4794-b1a3-cf1db49fd7df\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.953566 4814 request.go:700] Waited for 1.875391416s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-samples-operator/serviceaccounts/cluster-samples-operator/token Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.967150 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m68h9\" (UniqueName: \"kubernetes.io/projected/d1fb380d-478d-4925-a8f3-bfe8ac8a40d6-kube-api-access-m68h9\") pod \"dns-operator-744455d44c-b86l9\" (UID: \"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6\") " pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.974456 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nstfc\" (UniqueName: \"kubernetes.io/projected/7b8abd82-5187-4029-82aa-f0d5495ce298-kube-api-access-nstfc\") pod \"cluster-samples-operator-665b6dd947-wlpxt\" (UID: \"7b8abd82-5187-4029-82aa-f0d5495ce298\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" Jan 22 05:20:52 crc kubenswrapper[4814]: I0122 05:20:52.988165 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" Jan 22 05:20:53 crc kubenswrapper[4814]: W0122 05:20:53.003230 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1980267a_5bfc_40b9_abe6_ae0e9774910c.slice/crio-ed381c69e1ab61ad60fee5918972ffa5dbff4810a49f2b5e62a32644e7367928 WatchSource:0}: Error finding container ed381c69e1ab61ad60fee5918972ffa5dbff4810a49f2b5e62a32644e7367928: Status 404 returned error can't find the container with id ed381c69e1ab61ad60fee5918972ffa5dbff4810a49f2b5e62a32644e7367928 Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.003376 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74dhz\" (UniqueName: \"kubernetes.io/projected/f6117233-bf75-4d0a-b930-099b1021a9ac-kube-api-access-74dhz\") pod \"authentication-operator-69f744f599-h7dw7\" (UID: \"f6117233-bf75-4d0a-b930-099b1021a9ac\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.020505 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7s58f\" (UniqueName: \"kubernetes.io/projected/44b904d6-2898-43bb-a072-54661fe953cd-kube-api-access-7s58f\") pod \"package-server-manager-789f6589d5-dh628\" (UID: \"44b904d6-2898-43bb-a072-54661fe953cd\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.041009 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xjps\" (UniqueName: \"kubernetes.io/projected/698622fa-b0b2-4099-995f-9c723376c176-kube-api-access-6xjps\") pod \"console-operator-58897d9998-kst8c\" (UID: \"698622fa-b0b2-4099-995f-9c723376c176\") " pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.054765 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvz56\" (UniqueName: \"kubernetes.io/projected/f5552558-72a7-40d0-a265-450ce55c22ad-kube-api-access-wvz56\") pod \"packageserver-d55dfcdfc-n6gln\" (UID: \"f5552558-72a7-40d0-a265-450ce55c22ad\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.069152 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.070024 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.073308 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np8fb\" (UniqueName: \"kubernetes.io/projected/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-kube-api-access-np8fb\") pod \"console-f9d7485db-jnnrg\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.098785 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.099730 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brgtj\" (UniqueName: \"kubernetes.io/projected/1a79fa9b-7c1f-487c-96f0-bda318f2180d-kube-api-access-brgtj\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.109279 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.113180 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3e80fbf9-0347-4103-b749-f3a0a9f5e485-bound-sa-token\") pod \"ingress-operator-5b745b69d9-7tzlt\" (UID: \"3e80fbf9-0347-4103-b749-f3a0a9f5e485\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.113899 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.133750 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.135993 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b75d4696-814c-420a-8283-df0ce39bdca7-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qgs25\" (UID: \"b75d4696-814c-420a-8283-df0ce39bdca7\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.139760 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.143182 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" event={"ID":"5388d463-7ff7-4465-ab69-3d0015d91232","Type":"ContainerStarted","Data":"04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4"} Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.143216 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" event={"ID":"5388d463-7ff7-4465-ab69-3d0015d91232","Type":"ContainerStarted","Data":"21765e9e16c3f4eb1fb2fa73249e947d958f9b0addc4c6a43acb35ece115db9f"} Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.143803 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.148878 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2gq6\" (UniqueName: \"kubernetes.io/projected/b6e1cb8a-27b0-4267-a54c-6858de4a1e1b-kube-api-access-j2gq6\") pod \"openshift-apiserver-operator-796bbdcf4f-c2wdb\" (UID: \"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.156290 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.156645 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" event={"ID":"1980267a-5bfc-40b9-abe6-ae0e9774910c","Type":"ContainerStarted","Data":"ed381c69e1ab61ad60fee5918972ffa5dbff4810a49f2b5e62a32644e7367928"} Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.158924 4814 generic.go:334] "Generic (PLEG): container finished" podID="70a9137f-7cb9-429a-b0fb-76f7184e7936" containerID="817d3c27e34cdef3c630a8ebdaac4fb813580ba3c52d0cf778485b995ed6f4e2" exitCode=0 Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.159737 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" event={"ID":"70a9137f-7cb9-429a-b0fb-76f7184e7936","Type":"ContainerDied","Data":"817d3c27e34cdef3c630a8ebdaac4fb813580ba3c52d0cf778485b995ed6f4e2"} Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.162829 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.177460 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" event={"ID":"47d42067-0194-4d5d-8cc8-a49e9065bc9b","Type":"ContainerStarted","Data":"ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f"} Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.178078 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.181112 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/43690e91-5b82-4acb-972e-24159036039f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kf8n8\" (UID: \"43690e91-5b82-4acb-972e-24159036039f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.187469 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.201875 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.203170 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rnpb\" (UniqueName: \"kubernetes.io/projected/63d01f3f-6487-4147-a862-70739c2c7961-kube-api-access-4rnpb\") pod \"downloads-7954f5f757-fsdht\" (UID: \"63d01f3f-6487-4147-a862-70739c2c7961\") " pod="openshift-console/downloads-7954f5f757-fsdht" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.216475 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1a79fa9b-7c1f-487c-96f0-bda318f2180d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-cdhwb\" (UID: \"1a79fa9b-7c1f-487c-96f0-bda318f2180d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.220867 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.234213 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z48ms\" (UniqueName: \"kubernetes.io/projected/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-kube-api-access-z48ms\") pod \"oauth-openshift-558db77b4-mhktc\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.236268 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.246085 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.269979 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.289692 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314568 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-etcd-ca\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314620 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1956797e-8b6c-41a4-b467-cc8ba5a34466-config\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314663 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314699 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhqzc\" (UniqueName: \"kubernetes.io/projected/7c4b5822-ec90-441f-b78f-dbb20d46d483-kube-api-access-zhqzc\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314714 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-etcd-service-ca\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314730 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/29aae90f-3db5-4e31-a13e-35049f8ff2de-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314748 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1956797e-8b6c-41a4-b467-cc8ba5a34466-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314766 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-bound-sa-token\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314780 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314801 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/acbaade5-e87d-4186-932a-9329053b6259-etcd-client\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314816 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-tls\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314832 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smtmn\" (UniqueName: \"kubernetes.io/projected/acbaade5-e87d-4186-932a-9329053b6259-kube-api-access-smtmn\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314858 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjq9f\" (UniqueName: \"kubernetes.io/projected/1956797e-8b6c-41a4-b467-cc8ba5a34466-kube-api-access-cjq9f\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314890 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b03e343f-bcd0-45f8-8ce0-962a6deb71db-available-featuregates\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314906 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-config\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314927 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-trusted-ca\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314942 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1956797e-8b6c-41a4-b467-cc8ba5a34466-images\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314956 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acbaade5-e87d-4186-932a-9329053b6259-serving-cert\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.314978 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/29aae90f-3db5-4e31-a13e-35049f8ff2de-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.315000 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.315020 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7c4b5822-ec90-441f-b78f-dbb20d46d483-proxy-tls\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.315034 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b03e343f-bcd0-45f8-8ce0-962a6deb71db-serving-cert\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.315049 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqqnb\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-kube-api-access-pqqnb\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.315064 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckhql\" (UniqueName: \"kubernetes.io/projected/b03e343f-bcd0-45f8-8ce0-962a6deb71db-kube-api-access-ckhql\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.315079 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7c4b5822-ec90-441f-b78f-dbb20d46d483-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.315094 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.315111 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-certificates\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: E0122 05:20:53.316281 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:53.816264424 +0000 UTC m=+139.899752639 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.379023 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-fsdht" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.385866 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416083 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:53 crc kubenswrapper[4814]: E0122 05:20:53.416356 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:53.91633091 +0000 UTC m=+139.999819125 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416393 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-metrics-tls\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416444 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-bound-sa-token\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416462 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-tls\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416517 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416541 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/acbaade5-e87d-4186-932a-9329053b6259-etcd-client\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416601 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smtmn\" (UniqueName: \"kubernetes.io/projected/acbaade5-e87d-4186-932a-9329053b6259-kube-api-access-smtmn\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416640 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416659 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpnbg\" (UniqueName: \"kubernetes.io/projected/06673954-7d33-4d69-93ea-fd64814b74a1-kube-api-access-zpnbg\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416703 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6bcd32d0-1f64-48d5-b9d9-5556573a6927-proxy-tls\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416824 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-mountpoint-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416841 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqzxf\" (UniqueName: \"kubernetes.io/projected/b6998da9-cc02-4da8-b3d4-c02f32318b6f-kube-api-access-tqzxf\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416867 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-node-bootstrap-token\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416884 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-metrics-certs\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416915 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqz9q\" (UniqueName: \"kubernetes.io/projected/fcca12ae-2952-47fb-b97c-6d913948ae44-kube-api-access-sqz9q\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416960 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjq9f\" (UniqueName: \"kubernetes.io/projected/1956797e-8b6c-41a4-b467-cc8ba5a34466-kube-api-access-cjq9f\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416976 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5jpz\" (UniqueName: \"kubernetes.io/projected/6bcd32d0-1f64-48d5-b9d9-5556573a6927-kube-api-access-m5jpz\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.416991 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c76242d7-8bf3-47f3-9a71-443e13a63e41-cert\") pod \"ingress-canary-2w4zq\" (UID: \"c76242d7-8bf3-47f3-9a71-443e13a63e41\") " pod="openshift-ingress-canary/ingress-canary-2w4zq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417041 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-config-volume\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417077 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b03e343f-bcd0-45f8-8ce0-962a6deb71db-available-featuregates\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417094 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-config\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417117 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-registration-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417135 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-trusted-ca\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417150 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcca12ae-2952-47fb-b97c-6d913948ae44-secret-volume\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417172 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1956797e-8b6c-41a4-b467-cc8ba5a34466-images\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417187 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e0528ce7-4f45-42b2-bd37-defa6ae2b09a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l28jd\" (UID: \"e0528ce7-4f45-42b2-bd37-defa6ae2b09a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417202 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcmlv\" (UniqueName: \"kubernetes.io/projected/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-kube-api-access-vcmlv\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417240 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acbaade5-e87d-4186-932a-9329053b6259-serving-cert\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417254 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-stats-auth\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417269 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/805bbe6e-8f70-4666-9bb2-10ee278b883e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-rq5tj\" (UID: \"805bbe6e-8f70-4666-9bb2-10ee278b883e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417322 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/29aae90f-3db5-4e31-a13e-35049f8ff2de-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417375 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417392 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/eb067e7f-ca27-4376-b468-7c7735c1336a-signing-key\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417427 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b03e343f-bcd0-45f8-8ce0-962a6deb71db-serving-cert\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417443 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7c4b5822-ec90-441f-b78f-dbb20d46d483-proxy-tls\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417461 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bprbq\" (UniqueName: \"kubernetes.io/projected/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-kube-api-access-bprbq\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417514 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqqnb\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-kube-api-access-pqqnb\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417549 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckhql\" (UniqueName: \"kubernetes.io/projected/b03e343f-bcd0-45f8-8ce0-962a6deb71db-kube-api-access-ckhql\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417570 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j5vq\" (UniqueName: \"kubernetes.io/projected/e0528ce7-4f45-42b2-bd37-defa6ae2b09a-kube-api-access-5j5vq\") pod \"control-plane-machine-set-operator-78cbb6b69f-l28jd\" (UID: \"e0528ce7-4f45-42b2-bd37-defa6ae2b09a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417589 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d788s\" (UniqueName: \"kubernetes.io/projected/805bbe6e-8f70-4666-9bb2-10ee278b883e-kube-api-access-d788s\") pod \"multus-admission-controller-857f4d67dd-rq5tj\" (UID: \"805bbe6e-8f70-4666-9bb2-10ee278b883e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417639 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-csi-data-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417657 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-certs\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417688 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7c4b5822-ec90-441f-b78f-dbb20d46d483-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417717 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78cnf\" (UniqueName: \"kubernetes.io/projected/a76b9e2d-cd6a-41be-bc29-614b9cf46751-kube-api-access-78cnf\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417748 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-plugins-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417793 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417820 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417869 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bqvn\" (UniqueName: \"kubernetes.io/projected/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-kube-api-access-8bqvn\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417885 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-default-certificate\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417918 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-socket-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417939 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-certificates\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.417970 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/06673954-7d33-4d69-93ea-fd64814b74a1-profile-collector-cert\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418034 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-etcd-ca\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418050 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a76b9e2d-cd6a-41be-bc29-614b9cf46751-profile-collector-cert\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418095 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418121 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a76b9e2d-cd6a-41be-bc29-614b9cf46751-srv-cert\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418137 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6bcd32d0-1f64-48d5-b9d9-5556573a6927-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418165 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1956797e-8b6c-41a4-b467-cc8ba5a34466-config\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418208 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckq4g\" (UniqueName: \"kubernetes.io/projected/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-kube-api-access-ckq4g\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418223 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-serving-cert\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418280 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418295 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/06673954-7d33-4d69-93ea-fd64814b74a1-srv-cert\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418325 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/eb067e7f-ca27-4376-b468-7c7735c1336a-signing-cabundle\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418364 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-config\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418379 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsjtr\" (UniqueName: \"kubernetes.io/projected/eb067e7f-ca27-4376-b468-7c7735c1336a-kube-api-access-tsjtr\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418407 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flxz5\" (UniqueName: \"kubernetes.io/projected/c76242d7-8bf3-47f3-9a71-443e13a63e41-kube-api-access-flxz5\") pod \"ingress-canary-2w4zq\" (UID: \"c76242d7-8bf3-47f3-9a71-443e13a63e41\") " pod="openshift-ingress-canary/ingress-canary-2w4zq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418423 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhqzc\" (UniqueName: \"kubernetes.io/projected/7c4b5822-ec90-441f-b78f-dbb20d46d483-kube-api-access-zhqzc\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418462 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6p28\" (UniqueName: \"kubernetes.io/projected/2a31ffca-b39b-4c88-af05-b56eb149e248-kube-api-access-x6p28\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418478 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcca12ae-2952-47fb-b97c-6d913948ae44-config-volume\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418496 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-etcd-service-ca\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418544 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/29aae90f-3db5-4e31-a13e-35049f8ff2de-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418585 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418646 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1956797e-8b6c-41a4-b467-cc8ba5a34466-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418669 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a31ffca-b39b-4c88-af05-b56eb149e248-service-ca-bundle\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418695 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8ttm\" (UniqueName: \"kubernetes.io/projected/0b9f4ced-dcb7-458a-a111-71d67169f45b-kube-api-access-r8ttm\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.418723 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6bcd32d0-1f64-48d5-b9d9-5556573a6927-images\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: E0122 05:20:53.419973 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:53.919963192 +0000 UTC m=+140.003451407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.420554 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.420905 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7c4b5822-ec90-441f-b78f-dbb20d46d483-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.423171 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-etcd-service-ca\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.421704 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1956797e-8b6c-41a4-b467-cc8ba5a34466-config\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.430540 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-certificates\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.436103 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-etcd-ca\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.438283 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/b03e343f-bcd0-45f8-8ce0-962a6deb71db-available-featuregates\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.442818 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/acbaade5-e87d-4186-932a-9329053b6259-config\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.442884 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/29aae90f-3db5-4e31-a13e-35049f8ff2de-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.443081 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/acbaade5-e87d-4186-932a-9329053b6259-etcd-client\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.443148 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7c4b5822-ec90-441f-b78f-dbb20d46d483-proxy-tls\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.443525 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/acbaade5-e87d-4186-932a-9329053b6259-serving-cert\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.444073 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/1956797e-8b6c-41a4-b467-cc8ba5a34466-images\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.444722 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-trusted-ca\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.444984 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b03e343f-bcd0-45f8-8ce0-962a6deb71db-serving-cert\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.446377 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-h7dw7"] Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.446734 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-tls\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.449981 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.455219 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.456281 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/1956797e-8b6c-41a4-b467-cc8ba5a34466-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.460119 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-bound-sa-token\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.466396 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/29aae90f-3db5-4e31-a13e-35049f8ff2de-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.475428 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqqnb\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-kube-api-access-pqqnb\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.496167 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckhql\" (UniqueName: \"kubernetes.io/projected/b03e343f-bcd0-45f8-8ce0-962a6deb71db-kube-api-access-ckhql\") pod \"openshift-config-operator-7777fb866f-frrqc\" (UID: \"b03e343f-bcd0-45f8-8ce0-962a6deb71db\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.508406 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjq9f\" (UniqueName: \"kubernetes.io/projected/1956797e-8b6c-41a4-b467-cc8ba5a34466-kube-api-access-cjq9f\") pod \"machine-api-operator-5694c8668f-t5hkb\" (UID: \"1956797e-8b6c-41a4-b467-cc8ba5a34466\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520544 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520691 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5jpz\" (UniqueName: \"kubernetes.io/projected/6bcd32d0-1f64-48d5-b9d9-5556573a6927-kube-api-access-m5jpz\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520712 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c76242d7-8bf3-47f3-9a71-443e13a63e41-cert\") pod \"ingress-canary-2w4zq\" (UID: \"c76242d7-8bf3-47f3-9a71-443e13a63e41\") " pod="openshift-ingress-canary/ingress-canary-2w4zq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520740 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-config-volume\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520766 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-registration-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520782 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcca12ae-2952-47fb-b97c-6d913948ae44-secret-volume\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520799 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e0528ce7-4f45-42b2-bd37-defa6ae2b09a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l28jd\" (UID: \"e0528ce7-4f45-42b2-bd37-defa6ae2b09a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520815 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcmlv\" (UniqueName: \"kubernetes.io/projected/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-kube-api-access-vcmlv\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520830 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-stats-auth\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520844 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/805bbe6e-8f70-4666-9bb2-10ee278b883e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-rq5tj\" (UID: \"805bbe6e-8f70-4666-9bb2-10ee278b883e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520867 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/eb067e7f-ca27-4376-b468-7c7735c1336a-signing-key\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520889 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bprbq\" (UniqueName: \"kubernetes.io/projected/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-kube-api-access-bprbq\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520905 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j5vq\" (UniqueName: \"kubernetes.io/projected/e0528ce7-4f45-42b2-bd37-defa6ae2b09a-kube-api-access-5j5vq\") pod \"control-plane-machine-set-operator-78cbb6b69f-l28jd\" (UID: \"e0528ce7-4f45-42b2-bd37-defa6ae2b09a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520919 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d788s\" (UniqueName: \"kubernetes.io/projected/805bbe6e-8f70-4666-9bb2-10ee278b883e-kube-api-access-d788s\") pod \"multus-admission-controller-857f4d67dd-rq5tj\" (UID: \"805bbe6e-8f70-4666-9bb2-10ee278b883e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520934 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-csi-data-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520948 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-certs\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520975 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78cnf\" (UniqueName: \"kubernetes.io/projected/a76b9e2d-cd6a-41be-bc29-614b9cf46751-kube-api-access-78cnf\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.520989 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-plugins-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521003 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521020 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bqvn\" (UniqueName: \"kubernetes.io/projected/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-kube-api-access-8bqvn\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521033 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-default-certificate\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521050 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-socket-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521066 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/06673954-7d33-4d69-93ea-fd64814b74a1-profile-collector-cert\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521083 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a76b9e2d-cd6a-41be-bc29-614b9cf46751-profile-collector-cert\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521100 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521113 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a76b9e2d-cd6a-41be-bc29-614b9cf46751-srv-cert\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521127 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6bcd32d0-1f64-48d5-b9d9-5556573a6927-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521143 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckq4g\" (UniqueName: \"kubernetes.io/projected/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-kube-api-access-ckq4g\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521168 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-serving-cert\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521184 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/06673954-7d33-4d69-93ea-fd64814b74a1-srv-cert\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521199 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/eb067e7f-ca27-4376-b468-7c7735c1336a-signing-cabundle\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521225 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-config\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521243 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsjtr\" (UniqueName: \"kubernetes.io/projected/eb067e7f-ca27-4376-b468-7c7735c1336a-kube-api-access-tsjtr\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521261 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flxz5\" (UniqueName: \"kubernetes.io/projected/c76242d7-8bf3-47f3-9a71-443e13a63e41-kube-api-access-flxz5\") pod \"ingress-canary-2w4zq\" (UID: \"c76242d7-8bf3-47f3-9a71-443e13a63e41\") " pod="openshift-ingress-canary/ingress-canary-2w4zq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521283 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6p28\" (UniqueName: \"kubernetes.io/projected/2a31ffca-b39b-4c88-af05-b56eb149e248-kube-api-access-x6p28\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521297 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcca12ae-2952-47fb-b97c-6d913948ae44-config-volume\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521314 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521329 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a31ffca-b39b-4c88-af05-b56eb149e248-service-ca-bundle\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521344 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8ttm\" (UniqueName: \"kubernetes.io/projected/0b9f4ced-dcb7-458a-a111-71d67169f45b-kube-api-access-r8ttm\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521359 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6bcd32d0-1f64-48d5-b9d9-5556573a6927-images\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521374 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-metrics-tls\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521399 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521414 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpnbg\" (UniqueName: \"kubernetes.io/projected/06673954-7d33-4d69-93ea-fd64814b74a1-kube-api-access-zpnbg\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521429 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6bcd32d0-1f64-48d5-b9d9-5556573a6927-proxy-tls\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521450 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-mountpoint-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521465 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqzxf\" (UniqueName: \"kubernetes.io/projected/b6998da9-cc02-4da8-b3d4-c02f32318b6f-kube-api-access-tqzxf\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521479 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-node-bootstrap-token\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521493 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-metrics-certs\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.521508 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqz9q\" (UniqueName: \"kubernetes.io/projected/fcca12ae-2952-47fb-b97c-6d913948ae44-kube-api-access-sqz9q\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: E0122 05:20:53.521720 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.021706745 +0000 UTC m=+140.105194960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:53 crc kubenswrapper[4814]: W0122 05:20:53.522501 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf6117233_bf75_4d0a_b930_099b1021a9ac.slice/crio-72931d3657df792ff2542d74a736da69735b4866ec0497c661dd4180a2be66b8 WatchSource:0}: Error finding container 72931d3657df792ff2542d74a736da69735b4866ec0497c661dd4180a2be66b8: Status 404 returned error can't find the container with id 72931d3657df792ff2542d74a736da69735b4866ec0497c661dd4180a2be66b8 Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.523799 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a31ffca-b39b-4c88-af05-b56eb149e248-service-ca-bundle\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.524294 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6bcd32d0-1f64-48d5-b9d9-5556573a6927-images\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.526339 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c76242d7-8bf3-47f3-9a71-443e13a63e41-cert\") pod \"ingress-canary-2w4zq\" (UID: \"c76242d7-8bf3-47f3-9a71-443e13a63e41\") " pod="openshift-ingress-canary/ingress-canary-2w4zq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.527190 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-registration-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.529075 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-config-volume\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.534842 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-mountpoint-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.535701 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a76b9e2d-cd6a-41be-bc29-614b9cf46751-srv-cert\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.538222 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6bcd32d0-1f64-48d5-b9d9-5556573a6927-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.540059 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-certs\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.541257 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.541424 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-csi-data-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.541475 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-plugins-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.542440 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/eb067e7f-ca27-4376-b468-7c7735c1336a-signing-cabundle\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.545086 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.545148 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcca12ae-2952-47fb-b97c-6d913948ae44-config-volume\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.545581 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-config\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.545712 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/b6998da9-cc02-4da8-b3d4-c02f32318b6f-socket-dir\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.547617 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcca12ae-2952-47fb-b97c-6d913948ae44-secret-volume\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.551588 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/06673954-7d33-4d69-93ea-fd64814b74a1-profile-collector-cert\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.552466 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-metrics-certs\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.558177 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a76b9e2d-cd6a-41be-bc29-614b9cf46751-profile-collector-cert\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.558968 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-node-bootstrap-token\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.571124 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/805bbe6e-8f70-4666-9bb2-10ee278b883e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-rq5tj\" (UID: \"805bbe6e-8f70-4666-9bb2-10ee278b883e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.574260 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.576562 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6bcd32d0-1f64-48d5-b9d9-5556573a6927-proxy-tls\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.577372 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/e0528ce7-4f45-42b2-bd37-defa6ae2b09a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-l28jd\" (UID: \"e0528ce7-4f45-42b2-bd37-defa6ae2b09a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.584858 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smtmn\" (UniqueName: \"kubernetes.io/projected/acbaade5-e87d-4186-932a-9329053b6259-kube-api-access-smtmn\") pod \"etcd-operator-b45778765-bmlds\" (UID: \"acbaade5-e87d-4186-932a-9329053b6259\") " pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.585050 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.585157 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-serving-cert\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.585357 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-metrics-tls\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.585531 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-stats-auth\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.585914 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2a31ffca-b39b-4c88-af05-b56eb149e248-default-certificate\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.586425 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/eb067e7f-ca27-4376-b468-7c7735c1336a-signing-key\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.594207 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/06673954-7d33-4d69-93ea-fd64814b74a1-srv-cert\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.627686 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: E0122 05:20:53.628208 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.128195307 +0000 UTC m=+140.211683512 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.634261 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhqzc\" (UniqueName: \"kubernetes.io/projected/7c4b5822-ec90-441f-b78f-dbb20d46d483-kube-api-access-zhqzc\") pod \"machine-config-controller-84d6567774-5cczv\" (UID: \"7c4b5822-ec90-441f-b78f-dbb20d46d483\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.634655 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.654373 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqz9q\" (UniqueName: \"kubernetes.io/projected/fcca12ae-2952-47fb-b97c-6d913948ae44-kube-api-access-sqz9q\") pod \"collect-profiles-29484315-6vxkt\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.667031 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5jpz\" (UniqueName: \"kubernetes.io/projected/6bcd32d0-1f64-48d5-b9d9-5556573a6927-kube-api-access-m5jpz\") pod \"machine-config-operator-74547568cd-dn9k6\" (UID: \"6bcd32d0-1f64-48d5-b9d9-5556573a6927\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.671122 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dec48317-4dfd-40c0-a60a-9ef7fdfaee68-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-qkcnm\" (UID: \"dec48317-4dfd-40c0-a60a-9ef7fdfaee68\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.714358 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpnbg\" (UniqueName: \"kubernetes.io/projected/06673954-7d33-4d69-93ea-fd64814b74a1-kube-api-access-zpnbg\") pod \"olm-operator-6b444d44fb-db688\" (UID: \"06673954-7d33-4d69-93ea-fd64814b74a1\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.721092 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.742898 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.743996 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8ttm\" (UniqueName: \"kubernetes.io/projected/0b9f4ced-dcb7-458a-a111-71d67169f45b-kube-api-access-r8ttm\") pod \"marketplace-operator-79b997595-g9h6j\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.744148 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:53 crc kubenswrapper[4814]: E0122 05:20:53.744502 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.24448668 +0000 UTC m=+140.327974895 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.747977 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcmlv\" (UniqueName: \"kubernetes.io/projected/d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a-kube-api-access-vcmlv\") pod \"service-ca-operator-777779d784-sshbr\" (UID: \"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.751862 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqzxf\" (UniqueName: \"kubernetes.io/projected/b6998da9-cc02-4da8-b3d4-c02f32318b6f-kube-api-access-tqzxf\") pod \"csi-hostpathplugin-5xkqn\" (UID: \"b6998da9-cc02-4da8-b3d4-c02f32318b6f\") " pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.770825 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt"] Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.772781 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.775574 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckq4g\" (UniqueName: \"kubernetes.io/projected/ec7a47e8-3b1c-4864-b18e-2884db3ded5b-kube-api-access-ckq4g\") pod \"dns-default-g4spg\" (UID: \"ec7a47e8-3b1c-4864-b18e-2884db3ded5b\") " pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.811446 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j5vq\" (UniqueName: \"kubernetes.io/projected/e0528ce7-4f45-42b2-bd37-defa6ae2b09a-kube-api-access-5j5vq\") pod \"control-plane-machine-set-operator-78cbb6b69f-l28jd\" (UID: \"e0528ce7-4f45-42b2-bd37-defa6ae2b09a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.814332 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-jnnrg"] Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.815179 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp"] Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.828647 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78cnf\" (UniqueName: \"kubernetes.io/projected/a76b9e2d-cd6a-41be-bc29-614b9cf46751-kube-api-access-78cnf\") pod \"catalog-operator-68c6474976-bbjzl\" (UID: \"a76b9e2d-cd6a-41be-bc29-614b9cf46751\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.828855 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.838329 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d788s\" (UniqueName: \"kubernetes.io/projected/805bbe6e-8f70-4666-9bb2-10ee278b883e-kube-api-access-d788s\") pod \"multus-admission-controller-857f4d67dd-rq5tj\" (UID: \"805bbe6e-8f70-4666-9bb2-10ee278b883e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.847679 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:53 crc kubenswrapper[4814]: E0122 05:20:53.848021 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.348010222 +0000 UTC m=+140.431498437 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.862552 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.867502 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bprbq\" (UniqueName: \"kubernetes.io/projected/9431a6ed-b6e1-4d41-bcd1-25a27b822a8c-kube-api-access-bprbq\") pod \"kube-storage-version-migrator-operator-b67b599dd-brx6v\" (UID: \"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.872255 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bqvn\" (UniqueName: \"kubernetes.io/projected/c7655574-91ab-46f7-8375-fe5a6eb6b1f4-kube-api-access-8bqvn\") pod \"machine-config-server-4spp2\" (UID: \"c7655574-91ab-46f7-8375-fe5a6eb6b1f4\") " pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.872467 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.881801 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.890273 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-kst8c"] Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.901921 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.903380 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsjtr\" (UniqueName: \"kubernetes.io/projected/eb067e7f-ca27-4376-b468-7c7735c1336a-kube-api-access-tsjtr\") pod \"service-ca-9c57cc56f-mc5wq\" (UID: \"eb067e7f-ca27-4376-b468-7c7735c1336a\") " pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.909471 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.909609 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6p28\" (UniqueName: \"kubernetes.io/projected/2a31ffca-b39b-4c88-af05-b56eb149e248-kube-api-access-x6p28\") pod \"router-default-5444994796-xd5fb\" (UID: \"2a31ffca-b39b-4c88-af05-b56eb149e248\") " pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.912264 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25"] Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.917487 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.936100 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.936647 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.941908 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.954397 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flxz5\" (UniqueName: \"kubernetes.io/projected/c76242d7-8bf3-47f3-9a71-443e13a63e41-kube-api-access-flxz5\") pod \"ingress-canary-2w4zq\" (UID: \"c76242d7-8bf3-47f3-9a71-443e13a63e41\") " pod="openshift-ingress-canary/ingress-canary-2w4zq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.954817 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.955697 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:53 crc kubenswrapper[4814]: E0122 05:20:53.956080 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.456067698 +0000 UTC m=+140.539555923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.956159 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.970570 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-2w4zq" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.978254 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" Jan 22 05:20:53 crc kubenswrapper[4814]: I0122 05:20:53.983668 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-4spp2" Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.061461 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.061756 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.561743313 +0000 UTC m=+140.645231528 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.102042 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c"] Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.121307 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8"] Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.151791 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.163102 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.163393 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.663378241 +0000 UTC m=+140.746866456 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.210987 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jnnrg" event={"ID":"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8","Type":"ContainerStarted","Data":"0fed9d6afb2138ba7d69e904890bd125fab460d26e8ebcc580e0cf23934bea50"} Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.224658 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" event={"ID":"f6117233-bf75-4d0a-b930-099b1021a9ac","Type":"ContainerStarted","Data":"72931d3657df792ff2542d74a736da69735b4866ec0497c661dd4180a2be66b8"} Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.228168 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" event={"ID":"1980267a-5bfc-40b9-abe6-ae0e9774910c","Type":"ContainerStarted","Data":"6fa0ae74c5ee923aba0353c2341cc2c0eb4a822f96050e41ab32915f9e09055b"} Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.268122 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" event={"ID":"dda1c739-e778-4794-b1a3-cf1db49fd7df","Type":"ContainerStarted","Data":"689a6f009d76eefdf0650e494d8cdc8cd7fb96a903500fe690fd8a14e3f82096"} Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.274032 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.274344 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.774330904 +0000 UTC m=+140.857819119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.317339 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" event={"ID":"b75d4696-814c-420a-8283-df0ce39bdca7","Type":"ContainerStarted","Data":"6760942eb98154ca673cda015b384a95690b48e0417914c9cbc42ce261bde6c1"} Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.327002 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-kst8c" event={"ID":"698622fa-b0b2-4099-995f-9c723376c176","Type":"ContainerStarted","Data":"9b41fda181bc3a4f7cb0c167918f4bd7e9fe2f40c680f443af95000bdd037385"} Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.376479 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.377063 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.877050189 +0000 UTC m=+140.960538404 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.402160 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k"] Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.481486 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.483323 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:54.983310995 +0000 UTC m=+141.066799210 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.540339 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt"] Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.551419 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb"] Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.586311 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.586615 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.086600639 +0000 UTC m=+141.170088854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.633289 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" podStartSLOduration=120.633271703 podStartE2EDuration="2m0.633271703s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:54.632872029 +0000 UTC m=+140.716360244" watchObservedRunningTime="2026-01-22 05:20:54.633271703 +0000 UTC m=+140.716759918" Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.687381 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.687689 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.187677759 +0000 UTC m=+141.271165974 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.707220 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-b86l9"] Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.791008 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.791304 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.291282323 +0000 UTC m=+141.374770538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.791444 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.791835 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.291826351 +0000 UTC m=+141.375314566 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:54 crc kubenswrapper[4814]: I0122 05:20:54.900249 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:54 crc kubenswrapper[4814]: E0122 05:20:54.900638 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.400608141 +0000 UTC m=+141.484096356 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.001731 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.002222 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.502210839 +0000 UTC m=+141.585699054 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.104534 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.104819 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.60480517 +0000 UTC m=+141.688293385 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.207410 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.210404 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.710389952 +0000 UTC m=+141.793878167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.286886 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mhktc"] Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.313620 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.314130 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.814113071 +0000 UTC m=+141.897601286 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.383278 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628"] Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.385471 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" event={"ID":"7b8abd82-5187-4029-82aa-f0d5495ce298","Type":"ContainerStarted","Data":"3bd8e920939dacf8fe365f96d7bf495048ed57a4f3e6b32ce7e39191b765214f"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.416172 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.416458 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:55.916446693 +0000 UTC m=+141.999934898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.436922 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" podStartSLOduration=121.436899933 podStartE2EDuration="2m1.436899933s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:55.38583503 +0000 UTC m=+141.469323245" watchObservedRunningTime="2026-01-22 05:20:55.436899933 +0000 UTC m=+141.520388148" Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.440428 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" event={"ID":"70a9137f-7cb9-429a-b0fb-76f7184e7936","Type":"ContainerStarted","Data":"1debd293563d4bf5e767e4602465be301c540cf2a1c81f9c97e32c1266f9128b"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.467211 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd"] Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.504083 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" event={"ID":"f6117233-bf75-4d0a-b930-099b1021a9ac","Type":"ContainerStarted","Data":"3b56a508d0e1c3871c31a9d116e7b1bdbd7e84eb19d135fcdfae4af6c577760d"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.518676 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.524960 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.024926522 +0000 UTC m=+142.108414737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.525604 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.526034 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.026021479 +0000 UTC m=+142.109509684 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.564900 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb"] Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.577527 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" event={"ID":"1980267a-5bfc-40b9-abe6-ae0e9774910c","Type":"ContainerStarted","Data":"705560ab7e497e381691e75323e353225f407ed694a26ca1e5e0b688fdeaaf85"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.629394 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.629802 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.12978518 +0000 UTC m=+142.213273385 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.649604 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-xd5fb" event={"ID":"2a31ffca-b39b-4c88-af05-b56eb149e248","Type":"ContainerStarted","Data":"1ea9fde1b0ce376675eb5bdb35b7162973039a75e86a278b259d5a2f24238cb5"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.653044 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" event={"ID":"dda1c739-e778-4794-b1a3-cf1db49fd7df","Type":"ContainerStarted","Data":"7ef5877cc4b1fe06d8bb83d6e50d26ecfb3c4b48b9858fe4133642f115857cbf"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.661769 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g9h6j"] Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.663354 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" event={"ID":"21498fe0-31d4-40b6-aa3a-c1cf4047c155","Type":"ContainerStarted","Data":"fad99b2e0b26f38250cfbc87604ef57f596c0298fd5e3d6e057035e94d725bba"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.685960 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" event={"ID":"3e80fbf9-0347-4103-b749-f3a0a9f5e485","Type":"ContainerStarted","Data":"34fedcd7563a113db08abf9cdf7ad01dfef378445872986c6acf0713f853ddaf"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.699557 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln"] Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.715957 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jnnrg" event={"ID":"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8","Type":"ContainerStarted","Data":"5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.717929 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-fsdht"] Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.726968 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" event={"ID":"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b","Type":"ContainerStarted","Data":"af266a6cae4a48e062dc5453126a423d88f16ee35b7fe63ba4f1dd33ce5d2bc6"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.731082 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.732421 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.232410651 +0000 UTC m=+142.315898866 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.746180 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-4spp2" event={"ID":"c7655574-91ab-46f7-8375-fe5a6eb6b1f4","Type":"ContainerStarted","Data":"8be23256bac16ab0fc4796445d996549abdac23c70bb5fc7d838b60a7e853516"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.760828 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" event={"ID":"43690e91-5b82-4acb-972e-24159036039f","Type":"ContainerStarted","Data":"d6c8ffb253e69318a59ec06a1d5a7f051442cfdb7818739a6e6b292efd19671b"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.766931 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" event={"ID":"968a156c-de1c-4d13-bfad-6596916711d5","Type":"ContainerStarted","Data":"82e90596256f6e537e3d4e9f817b9c1980c556feb245fa04bc254c5339a35d6a"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.769591 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" event={"ID":"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6","Type":"ContainerStarted","Data":"4b75f51f708e3641a30476a874b543a4303f522f6bb6acbd4c52a075cb56c759"} Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.832374 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.833643 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.333610206 +0000 UTC m=+142.417098421 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.951773 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:55 crc kubenswrapper[4814]: E0122 05:20:55.957109 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.457030839 +0000 UTC m=+142.540519054 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:55 crc kubenswrapper[4814]: I0122 05:20:55.990989 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" podStartSLOduration=122.990971384 podStartE2EDuration="2m2.990971384s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:55.984524416 +0000 UTC m=+142.068012631" watchObservedRunningTime="2026-01-22 05:20:55.990971384 +0000 UTC m=+142.074459599" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.054183 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.054603 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.55458441 +0000 UTC m=+142.638072625 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.132242 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-5mqhp" podStartSLOduration=122.13222487 podStartE2EDuration="2m2.13222487s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:56.100838581 +0000 UTC m=+142.184326796" watchObservedRunningTime="2026-01-22 05:20:56.13222487 +0000 UTC m=+142.215713085" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.140590 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-bmlds"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.157660 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.158040 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.6580293 +0000 UTC m=+142.741517515 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.158927 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-h7dw7" podStartSLOduration=123.15890787 podStartE2EDuration="2m3.15890787s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:56.144886747 +0000 UTC m=+142.228374972" watchObservedRunningTime="2026-01-22 05:20:56.15890787 +0000 UTC m=+142.242396085" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.249057 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-4spp2" podStartSLOduration=6.24904185 podStartE2EDuration="6.24904185s" podCreationTimestamp="2026-01-22 05:20:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:56.244665902 +0000 UTC m=+142.328154117" watchObservedRunningTime="2026-01-22 05:20:56.24904185 +0000 UTC m=+142.332530065" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.258472 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.258807 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.758791399 +0000 UTC m=+142.842279614 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.312066 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-zwdxr" podStartSLOduration=123.312047475 podStartE2EDuration="2m3.312047475s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:56.311340162 +0000 UTC m=+142.394828377" watchObservedRunningTime="2026-01-22 05:20:56.312047475 +0000 UTC m=+142.395535690" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.332371 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.335481 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.354819 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.354877 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5xkqn"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.359560 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.361996 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.86197946 +0000 UTC m=+142.945467675 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.385395 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-jnnrg" podStartSLOduration=123.385374589 podStartE2EDuration="2m3.385374589s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:56.375572119 +0000 UTC m=+142.459060334" watchObservedRunningTime="2026-01-22 05:20:56.385374589 +0000 UTC m=+142.468862804" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.413046 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.465313 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.465756 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.965726099 +0000 UTC m=+143.049214314 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.472020 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.472960 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:56.972945064 +0000 UTC m=+143.056433279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.518982 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-sshbr"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.530673 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-frrqc"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.574106 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.574468 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.074453618 +0000 UTC m=+143.157941823 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.580941 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-t5hkb"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.586887 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-mc5wq"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.602073 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-g4spg"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.652428 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.675826 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.676201 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.176173359 +0000 UTC m=+143.259661574 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.703952 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.721930 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.726418 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-2w4zq"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.740547 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-rq5tj"] Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.785145 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.785339 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.285318391 +0000 UTC m=+143.368806596 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.785446 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.785709 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.285699144 +0000 UTC m=+143.369187349 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.826993 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-kst8c" event={"ID":"698622fa-b0b2-4099-995f-9c723376c176","Type":"ContainerStarted","Data":"0770f7b393c8cc2117f79917a2b646ccdc65693ec799e88b0995dbd4a503f2c8"} Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.827758 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.864491 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" event={"ID":"3e80fbf9-0347-4103-b749-f3a0a9f5e485","Type":"ContainerStarted","Data":"cab2273afd020a2438c2e411cc0377cfaa2f86c6c0a69ad71324c17ed2083b4f"} Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.864539 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" event={"ID":"3e80fbf9-0347-4103-b749-f3a0a9f5e485","Type":"ContainerStarted","Data":"86ee50fe5d8f9b527ad8d77fc60a3b097a5448018130f18dd84cf3f2cf421b94"} Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.871221 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-kst8c" podStartSLOduration=123.871202549 podStartE2EDuration="2m3.871202549s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:56.862533406 +0000 UTC m=+142.946021621" watchObservedRunningTime="2026-01-22 05:20:56.871202549 +0000 UTC m=+142.954690764" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.874703 4814 patch_prober.go:28] interesting pod/console-operator-58897d9998-kst8c container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/readyz\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.874775 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-kst8c" podUID="698622fa-b0b2-4099-995f-9c723376c176" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/readyz\": dial tcp 10.217.0.19:8443: connect: connection refused" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.886866 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.887900 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.387883872 +0000 UTC m=+143.471372087 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.896279 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" event={"ID":"acbaade5-e87d-4186-932a-9329053b6259","Type":"ContainerStarted","Data":"6e22d0d5637ea1579ebc87a757d539e031b4b3073d2449eccf62b62fa9a15997"} Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.905775 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-7tzlt" podStartSLOduration=122.905760254 podStartE2EDuration="2m2.905760254s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:56.897403232 +0000 UTC m=+142.980891447" watchObservedRunningTime="2026-01-22 05:20:56.905760254 +0000 UTC m=+142.989248469" Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.925037 4814 csr.go:261] certificate signing request csr-79h2h is approved, waiting to be issued Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.960932 4814 csr.go:257] certificate signing request csr-79h2h is issued Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.986774 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" event={"ID":"43690e91-5b82-4acb-972e-24159036039f","Type":"ContainerStarted","Data":"613d0102d3611cad1476b06e50d5b699e37701ac9a5339a1ef17cafb065d2768"} Jan 22 05:20:56 crc kubenswrapper[4814]: I0122 05:20:56.989360 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:56 crc kubenswrapper[4814]: E0122 05:20:56.989741 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.489728867 +0000 UTC m=+143.573217082 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.022994 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" event={"ID":"1956797e-8b6c-41a4-b467-cc8ba5a34466","Type":"ContainerStarted","Data":"b41a9a87c40b330ec8af497b16d07db25492890388402802f89ed41dcd605d00"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.036578 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kf8n8" podStartSLOduration=123.036563776 podStartE2EDuration="2m3.036563776s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.036298108 +0000 UTC m=+143.119786323" watchObservedRunningTime="2026-01-22 05:20:57.036563776 +0000 UTC m=+143.120051991" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.063410 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-xd5fb" event={"ID":"2a31ffca-b39b-4c88-af05-b56eb149e248","Type":"ContainerStarted","Data":"2acfbe42ef327b2f78ecf8cc072d37b2c8fe5d8a11fedfbc6fd0c9feeb91b04d"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.076124 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" event={"ID":"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6","Type":"ContainerStarted","Data":"98ee9ade9453cf88d0e1674d2b7b04c828287fea636ac151b0d6ff82f560012d"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.086926 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-xd5fb" podStartSLOduration=123.086913596 podStartE2EDuration="2m3.086913596s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.085477207 +0000 UTC m=+143.168965422" watchObservedRunningTime="2026-01-22 05:20:57.086913596 +0000 UTC m=+143.170401811" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.094152 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.094773 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.59474677 +0000 UTC m=+143.678234985 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.107855 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" event={"ID":"70a9137f-7cb9-429a-b0fb-76f7184e7936","Type":"ContainerStarted","Data":"5de3e5263c592b21e107e9f883df227ae5c40e294593545b994969c413a8c935"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.109383 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" event={"ID":"06673954-7d33-4d69-93ea-fd64814b74a1","Type":"ContainerStarted","Data":"10454ba3fee682ccb21058ade0141d47c48a7dc9f99868e4f5a9d17c9e5e9e5a"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.110609 4814 generic.go:334] "Generic (PLEG): container finished" podID="968a156c-de1c-4d13-bfad-6596916711d5" containerID="b28fb658d105139f187015843a47b136938427be2ac0b26345aed6653c4d9716" exitCode=0 Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.110668 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" event={"ID":"968a156c-de1c-4d13-bfad-6596916711d5","Type":"ContainerDied","Data":"b28fb658d105139f187015843a47b136938427be2ac0b26345aed6653c4d9716"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.121518 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" event={"ID":"44b904d6-2898-43bb-a072-54661fe953cd","Type":"ContainerStarted","Data":"9dd133c50a6b1a4662bfb36a414494eb8de43f977769a4ef18c0a0727c322561"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.121556 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" event={"ID":"44b904d6-2898-43bb-a072-54661fe953cd","Type":"ContainerStarted","Data":"cdb0dc761454075b5fd53668a026ed885e6786172f0bb719d5bcd54d6e8273e1"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.122089 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.130780 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" podStartSLOduration=124.130770225 podStartE2EDuration="2m4.130770225s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.129918066 +0000 UTC m=+143.213406271" watchObservedRunningTime="2026-01-22 05:20:57.130770225 +0000 UTC m=+143.214258440" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.133811 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-c2wdb" event={"ID":"b6e1cb8a-27b0-4267-a54c-6858de4a1e1b","Type":"ContainerStarted","Data":"d6cf9a508b5291db83df6f3e615d364456c926f20d426b1fad82a2f4fd8f6723"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.146669 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" event={"ID":"b75d4696-814c-420a-8283-df0ce39bdca7","Type":"ContainerStarted","Data":"23200a00b54696bac335389818eaf15e3af5b1fc55687076258f3f19bfc1f5ab"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.153863 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.165836 4814 patch_prober.go:28] interesting pod/router-default-5444994796-xd5fb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:20:57 crc kubenswrapper[4814]: [-]has-synced failed: reason withheld Jan 22 05:20:57 crc kubenswrapper[4814]: [+]process-running ok Jan 22 05:20:57 crc kubenswrapper[4814]: healthz check failed Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.165899 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xd5fb" podUID="2a31ffca-b39b-4c88-af05-b56eb149e248" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.174969 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-g4spg" event={"ID":"ec7a47e8-3b1c-4864-b18e-2884db3ded5b","Type":"ContainerStarted","Data":"b45d71570437ab4c14e243dcabec903759cbd9e64f97f201b1c15905b36272ae"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.187738 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" event={"ID":"21498fe0-31d4-40b6-aa3a-c1cf4047c155","Type":"ContainerStarted","Data":"3f8ff1ec65bef8479dbc5493379031ed7472f00e8fc893eea935485634586d57"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.188301 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" podStartSLOduration=123.188285195 podStartE2EDuration="2m3.188285195s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.161527233 +0000 UTC m=+143.245015448" watchObservedRunningTime="2026-01-22 05:20:57.188285195 +0000 UTC m=+143.271773410" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.198825 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.200790 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.700776767 +0000 UTC m=+143.784264982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.216987 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qgs25" podStartSLOduration=123.216973043 podStartE2EDuration="2m3.216973043s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.216242328 +0000 UTC m=+143.299730543" watchObservedRunningTime="2026-01-22 05:20:57.216973043 +0000 UTC m=+143.300461258" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.222860 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" event={"ID":"1a79fa9b-7c1f-487c-96f0-bda318f2180d","Type":"ContainerStarted","Data":"f0934b377b2dabe5801bc61851c7e38f7ecd857870d24923552990a7219f2760"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.222913 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" event={"ID":"1a79fa9b-7c1f-487c-96f0-bda318f2180d","Type":"ContainerStarted","Data":"fe6211178f5dc7f71681b522f6c94828d801b152aefda6492ed62e75b1d855c3"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.242975 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" event={"ID":"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a","Type":"ContainerStarted","Data":"b265793f6217e6583020fd03d6105a5d57b74e1c43291f3332d02884a28c4113"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.244794 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" event={"ID":"7c4b5822-ec90-441f-b78f-dbb20d46d483","Type":"ContainerStarted","Data":"8b5e6513f27cdf6282185d5700c6788efee82033c692d27688d1bbc7fad3d70e"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.272987 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fsdht" event={"ID":"63d01f3f-6487-4147-a862-70739c2c7961","Type":"ContainerStarted","Data":"731cd57442799c4e980917c863c758a97a11f3bd4e273f1ce623c08d1b23f2ff"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.273900 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-fsdht" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.279951 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" podStartSLOduration=123.279937987 podStartE2EDuration="2m3.279937987s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.2557156 +0000 UTC m=+143.339203815" watchObservedRunningTime="2026-01-22 05:20:57.279937987 +0000 UTC m=+143.363426192" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.283620 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" event={"ID":"b6998da9-cc02-4da8-b3d4-c02f32318b6f","Type":"ContainerStarted","Data":"af4184106d9c6bfaa1fae03b837afe78f5d5d24c34a6d33a5f1f409ab66b9ceb"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.290195 4814 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsdht container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.290250 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fsdht" podUID="63d01f3f-6487-4147-a862-70739c2c7961" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.300055 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.301318 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.801298348 +0000 UTC m=+143.884786563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.312325 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-cdhwb" podStartSLOduration=123.312307609 podStartE2EDuration="2m3.312307609s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.280594789 +0000 UTC m=+143.364083004" watchObservedRunningTime="2026-01-22 05:20:57.312307609 +0000 UTC m=+143.395795824" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.342344 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" event={"ID":"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87","Type":"ContainerStarted","Data":"5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.342382 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" event={"ID":"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87","Type":"ContainerStarted","Data":"ad1f90a981441c7c55a2b109b00946cd5204f1d5d0ed427365abe869d1e17af7"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.343248 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.345602 4814 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-mhktc container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.11:6443/healthz\": dial tcp 10.217.0.11:6443: connect: connection refused" start-of-body= Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.345811 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" podUID="cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.11:6443/healthz\": dial tcp 10.217.0.11:6443: connect: connection refused" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.355471 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-4spp2" event={"ID":"c7655574-91ab-46f7-8375-fe5a6eb6b1f4","Type":"ContainerStarted","Data":"36ed19be4cbbe470ea5423059c4aef92ea308c41f835b8812790eeccbe301868"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.367248 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" podStartSLOduration=124.367229791 podStartE2EDuration="2m4.367229791s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.366814888 +0000 UTC m=+143.450303103" watchObservedRunningTime="2026-01-22 05:20:57.367229791 +0000 UTC m=+143.450718006" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.368828 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-fsdht" podStartSLOduration=124.368822986 podStartE2EDuration="2m4.368822986s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.312909039 +0000 UTC m=+143.396397254" watchObservedRunningTime="2026-01-22 05:20:57.368822986 +0000 UTC m=+143.452311191" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.387093 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" event={"ID":"f5552558-72a7-40d0-a265-450ce55c22ad","Type":"ContainerStarted","Data":"7fe4cc3ccccb28bfce1e774417eedb4b9270f4e39ec5e0c1a8491615fb64e33d"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.387674 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.392242 4814 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-n6gln container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" start-of-body= Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.392285 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" podUID="f5552558-72a7-40d0-a265-450ce55c22ad" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.18:5443/healthz\": dial tcp 10.217.0.18:5443: connect: connection refused" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.403296 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.403668 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:57.90365354 +0000 UTC m=+143.987141745 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.407257 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" event={"ID":"b03e343f-bcd0-45f8-8ce0-962a6deb71db","Type":"ContainerStarted","Data":"fa3ffc773be33f92a614c0a90a61ad79c41bed24bde34ecf7640128a4ab345f2"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.432515 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" podStartSLOduration=123.432499983 podStartE2EDuration="2m3.432499983s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.431610083 +0000 UTC m=+143.515098298" watchObservedRunningTime="2026-01-22 05:20:57.432499983 +0000 UTC m=+143.515988198" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.440193 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" event={"ID":"0b9f4ced-dcb7-458a-a111-71d67169f45b","Type":"ContainerStarted","Data":"1fc5735fed31aeb09fee25efe65b2893ce9de65288c8092f259b7dc07c0b6211"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.441058 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.452072 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" event={"ID":"6bcd32d0-1f64-48d5-b9d9-5556573a6927","Type":"ContainerStarted","Data":"b2abfbc087da46cb0a5e56e2fd54e72fdf9d83c542ca7643b63e8b0b14cd6473"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.458190 4814 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-g9h6j container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.458254 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" podUID="0b9f4ced-dcb7-458a-a111-71d67169f45b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.478821 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" podStartSLOduration=123.478798416 podStartE2EDuration="2m3.478798416s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.471418376 +0000 UTC m=+143.554906591" watchObservedRunningTime="2026-01-22 05:20:57.478798416 +0000 UTC m=+143.562286651" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.494425 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" event={"ID":"eb067e7f-ca27-4376-b468-7c7735c1336a","Type":"ContainerStarted","Data":"edbca1d1225aada6e89dbe47e1aca295612b769bd0fd35bd8b609353e3eb8dfd"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.504885 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.506543 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.006509151 +0000 UTC m=+144.089997366 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.527720 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" event={"ID":"e0528ce7-4f45-42b2-bd37-defa6ae2b09a","Type":"ContainerStarted","Data":"d79080fac6287b4e30d5c142d34fb8c77504a8a7f59b002698512888b2a5d414"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.527762 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" event={"ID":"e0528ce7-4f45-42b2-bd37-defa6ae2b09a","Type":"ContainerStarted","Data":"7ed03b56439da9f853565e721d8a32847bbbe4fd27120c65bb66ff263e38f9ff"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.545089 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" event={"ID":"dec48317-4dfd-40c0-a60a-9ef7fdfaee68","Type":"ContainerStarted","Data":"05b05e2848b12c1f62be5f690b0fa5849bd24df529f228fe9c1f25aae9a61cfb"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.555788 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-l28jd" podStartSLOduration=123.555774752 podStartE2EDuration="2m3.555774752s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.552667417 +0000 UTC m=+143.636155632" watchObservedRunningTime="2026-01-22 05:20:57.555774752 +0000 UTC m=+143.639262967" Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.580252 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" event={"ID":"7b8abd82-5187-4029-82aa-f0d5495ce298","Type":"ContainerStarted","Data":"e2539e0016b5eac8365b1c22272184ce1cfcc3d095b89a99b2834b162cbbf1f4"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.580286 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" event={"ID":"7b8abd82-5187-4029-82aa-f0d5495ce298","Type":"ContainerStarted","Data":"ee700a5bd108396b5ee37a9501556d1122f00a5f21a1fb66ae01090c01cd1375"} Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.607423 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.626103 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.126083164 +0000 UTC m=+144.209571379 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.728603 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.729083 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.229064158 +0000 UTC m=+144.312552373 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.729385 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.729760 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.229751631 +0000 UTC m=+144.313239846 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.829956 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.830456 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.330444118 +0000 UTC m=+144.413932333 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.932619 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:57 crc kubenswrapper[4814]: E0122 05:20:57.933201 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.433189764 +0000 UTC m=+144.516677979 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.961768 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-22 05:15:56 +0000 UTC, rotation deadline is 2026-11-08 13:35:41.162731628 +0000 UTC Jan 22 05:20:57 crc kubenswrapper[4814]: I0122 05:20:57.961796 4814 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6968h14m43.200938079s for next certificate rotation Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.033641 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.033976 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.533941673 +0000 UTC m=+144.617429888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.034587 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.034936 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.534923926 +0000 UTC m=+144.618412141 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.135341 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.135953 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.635936804 +0000 UTC m=+144.719425019 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.164794 4814 patch_prober.go:28] interesting pod/router-default-5444994796-xd5fb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:20:58 crc kubenswrapper[4814]: [-]has-synced failed: reason withheld Jan 22 05:20:58 crc kubenswrapper[4814]: [+]process-running ok Jan 22 05:20:58 crc kubenswrapper[4814]: healthz check failed Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.165128 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xd5fb" podUID="2a31ffca-b39b-4c88-af05-b56eb149e248" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.236817 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.237125 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.737110837 +0000 UTC m=+144.820599052 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.337763 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.337994 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.83797216 +0000 UTC m=+144.921460375 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.439683 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.439988 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:58.93997691 +0000 UTC m=+145.023465125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.541173 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.541362 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.04134698 +0000 UTC m=+145.124835195 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.541512 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.541843 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.041832367 +0000 UTC m=+145.125320582 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.594038 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" event={"ID":"805bbe6e-8f70-4666-9bb2-10ee278b883e","Type":"ContainerStarted","Data":"9cf3aafaefcaa1371ac4a842c834fbb4561dba063d13f7f7ac6bcce760026a6f"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.594092 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" event={"ID":"805bbe6e-8f70-4666-9bb2-10ee278b883e","Type":"ContainerStarted","Data":"32f8b9bdb0a76fd722969e15de9788e0eb5eeffc9c34fd85952973a8671acea2"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.596516 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" event={"ID":"44b904d6-2898-43bb-a072-54661fe953cd","Type":"ContainerStarted","Data":"34c20580734c5e14856f4c2f34cf9ecd45015034702f82bfa04ac77931873218"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.597932 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" event={"ID":"fcca12ae-2952-47fb-b97c-6d913948ae44","Type":"ContainerStarted","Data":"f58812cb612307406e46c22b6aded9552ae2b24ddb1335bd69d86f74abe6bf49"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.597968 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" event={"ID":"fcca12ae-2952-47fb-b97c-6d913948ae44","Type":"ContainerStarted","Data":"96ad6a7180c753a59eae8da09aedca3106dba94542c6e2e9aed6f9da910bc0c1"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.608840 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" event={"ID":"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c","Type":"ContainerStarted","Data":"e079b1deec43f7ce8e7a129c4eaa7116dd64215b6cc3aac68d0a7bdf4582e941"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.608881 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" event={"ID":"9431a6ed-b6e1-4d41-bcd1-25a27b822a8c","Type":"ContainerStarted","Data":"39e1f609c249281afce303b349eeb91f53742eba1e2eb0c18fc0f6346b8ca7a4"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.620834 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" event={"ID":"06673954-7d33-4d69-93ea-fd64814b74a1","Type":"ContainerStarted","Data":"1d4f5a08dc9bd4074d2a873da8971e4527240f06b4dfb46822beac355aec27d3"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.621609 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.626921 4814 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-db688 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.626958 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" podUID="06673954-7d33-4d69-93ea-fd64814b74a1" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.634459 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" event={"ID":"0b9f4ced-dcb7-458a-a111-71d67169f45b","Type":"ContainerStarted","Data":"05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.636140 4814 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-g9h6j container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.636191 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" podUID="0b9f4ced-dcb7-458a-a111-71d67169f45b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.644647 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.645865 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.145841045 +0000 UTC m=+145.229329260 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.650869 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" event={"ID":"d1fb380d-478d-4925-a8f3-bfe8ac8a40d6","Type":"ContainerStarted","Data":"4a068af84ac2081bf9d36b5153d7e377fad556ea252076f9f793d025aaeec631"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.655077 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wlpxt" podStartSLOduration=125.655064706 podStartE2EDuration="2m5.655064706s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:57.631720504 +0000 UTC m=+143.715208719" watchObservedRunningTime="2026-01-22 05:20:58.655064706 +0000 UTC m=+144.738552921" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.696950 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" podStartSLOduration=125.696928308 podStartE2EDuration="2m5.696928308s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:58.657002652 +0000 UTC m=+144.740490867" watchObservedRunningTime="2026-01-22 05:20:58.696928308 +0000 UTC m=+144.780416513" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.725167 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-g4spg" event={"ID":"ec7a47e8-3b1c-4864-b18e-2884db3ded5b","Type":"ContainerStarted","Data":"4ae8b8cacc2d6f570971128f3100a80eb80f62c5a9b6e1ee7e561d3c2433ec8e"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.725207 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-g4spg" event={"ID":"ec7a47e8-3b1c-4864-b18e-2884db3ded5b","Type":"ContainerStarted","Data":"d4a05e77bb67ebcd6a2050cad2997207114feb15c50233703a1d08a1e749c1e5"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.725813 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-g4spg" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.728231 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" event={"ID":"1956797e-8b6c-41a4-b467-cc8ba5a34466","Type":"ContainerStarted","Data":"145fe353230c24de12eb3c080cd22fd5b9aecb2350d3d769346ec2971afb5f50"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.728254 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" event={"ID":"1956797e-8b6c-41a4-b467-cc8ba5a34466","Type":"ContainerStarted","Data":"8b28c262e7cb2a4f38f70461a2fc60077837760dec60164faea2d8d6b1b0308b"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.732441 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" event={"ID":"f5552558-72a7-40d0-a265-450ce55c22ad","Type":"ContainerStarted","Data":"2990877060b05da71f0b386198aa881b5eba4613c7475fd99a6ccd2233ae39b4"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.749009 4814 generic.go:334] "Generic (PLEG): container finished" podID="b03e343f-bcd0-45f8-8ce0-962a6deb71db" containerID="0dce2a6d91faa10236b0be8de19910008cff368f7ece1f535676f4beed2b73d4" exitCode=0 Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.749196 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" event={"ID":"b03e343f-bcd0-45f8-8ce0-962a6deb71db","Type":"ContainerDied","Data":"0dce2a6d91faa10236b0be8de19910008cff368f7ece1f535676f4beed2b73d4"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.749512 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.750023 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.250001168 +0000 UTC m=+145.333489373 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.767786 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" event={"ID":"d46681e7-0d4a-4d3c-b7c2-cabe6a0b767a","Type":"ContainerStarted","Data":"3e59a8d0c9400fc114ec2eccb66fecdebd8be9e5c53121d91b6fc24b77ffa8f4"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.789726 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-b86l9" podStartSLOduration=124.789694378 podStartE2EDuration="2m4.789694378s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:58.715614699 +0000 UTC m=+144.799102914" watchObservedRunningTime="2026-01-22 05:20:58.789694378 +0000 UTC m=+144.873182593" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.791621 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2w4zq" event={"ID":"c76242d7-8bf3-47f3-9a71-443e13a63e41","Type":"ContainerStarted","Data":"35869094876e5808cfd3c47f40a76b6d7a302c280528c625a09412d05081cfb7"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.791667 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-2w4zq" event={"ID":"c76242d7-8bf3-47f3-9a71-443e13a63e41","Type":"ContainerStarted","Data":"ac99ba2b85da00bc0573d24ff6074354905ce2d6cb15a62d9b9f679b2087bd3d"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.815827 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" event={"ID":"7c4b5822-ec90-441f-b78f-dbb20d46d483","Type":"ContainerStarted","Data":"d93c9105ff7c2dfb7c3c4f149ff40f0381f5cac8040bde413cd0b0cc4b8f1c0d"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.815868 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" event={"ID":"7c4b5822-ec90-441f-b78f-dbb20d46d483","Type":"ContainerStarted","Data":"dac9e2688b59463ef40e68a2a424adea8e842d85a359940ed67807e25dcf77f4"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.825759 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-brx6v" podStartSLOduration=124.825745564 podStartE2EDuration="2m4.825745564s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:58.825690032 +0000 UTC m=+144.909178247" watchObservedRunningTime="2026-01-22 05:20:58.825745564 +0000 UTC m=+144.909233779" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.826221 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-fsdht" event={"ID":"63d01f3f-6487-4147-a862-70739c2c7961","Type":"ContainerStarted","Data":"b25a6340ab36977c3975638694392158161a8bcc50d5e621091bfb2386b4f478"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.826863 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" podStartSLOduration=124.826856852 podStartE2EDuration="2m4.826856852s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:58.791359734 +0000 UTC m=+144.874847949" watchObservedRunningTime="2026-01-22 05:20:58.826856852 +0000 UTC m=+144.910345067" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.827066 4814 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsdht container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.827304 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fsdht" podUID="63d01f3f-6487-4147-a862-70739c2c7961" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.844863 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" event={"ID":"acbaade5-e87d-4186-932a-9329053b6259","Type":"ContainerStarted","Data":"12517629475a294685b1185e2a6ca0270544cde0874def269dd71081e70b47e1"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.854028 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.855151 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.355131205 +0000 UTC m=+145.438619420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.857071 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" event={"ID":"eb067e7f-ca27-4376-b468-7c7735c1336a","Type":"ContainerStarted","Data":"728bb281b23ae2a9bc4e1d2568d6f7bcdc1e7f9fb2abd65b7a3534b8cbe41566"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.891149 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" event={"ID":"dec48317-4dfd-40c0-a60a-9ef7fdfaee68","Type":"ContainerStarted","Data":"098e226c153dfe2a8a7b2b27a13015082b9a26d8ede137387ac217c5eae48256"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.912602 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-t5hkb" podStartSLOduration=124.912584083 podStartE2EDuration="2m4.912584083s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:58.867541794 +0000 UTC m=+144.951030009" watchObservedRunningTime="2026-01-22 05:20:58.912584083 +0000 UTC m=+144.996072298" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.928746 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" event={"ID":"968a156c-de1c-4d13-bfad-6596916711d5","Type":"ContainerStarted","Data":"4e9c05b9b829c396e3aae6d85d3451a61c280a0ba42ddc21974a4143ff5dbf89"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.961418 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.964878 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" event={"ID":"a76b9e2d-cd6a-41be-bc29-614b9cf46751","Type":"ContainerStarted","Data":"a64e7cb5f163555f127a67daa49dd37d3ade2a58c6d2bebe3df115a4d5d2d16d"} Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.964916 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" event={"ID":"a76b9e2d-cd6a-41be-bc29-614b9cf46751","Type":"ContainerStarted","Data":"f519fa3b9db8b8eae0c7fe6441e6b8792f1f14e99aa974e613f85ffebabbc829"} Jan 22 05:20:58 crc kubenswrapper[4814]: E0122 05:20:58.980982 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.4809576 +0000 UTC m=+145.564445815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.986163 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.987296 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sshbr" podStartSLOduration=124.987280474 podStartE2EDuration="2m4.987280474s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:58.986542999 +0000 UTC m=+145.070031214" watchObservedRunningTime="2026-01-22 05:20:58.987280474 +0000 UTC m=+145.070768689" Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.997820 4814 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-bbjzl container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" start-of-body= Jan 22 05:20:58 crc kubenswrapper[4814]: I0122 05:20:58.997874 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" podUID="a76b9e2d-cd6a-41be-bc29-614b9cf46751" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": dial tcp 10.217.0.35:8443: connect: connection refused" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.015296 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" event={"ID":"6bcd32d0-1f64-48d5-b9d9-5556573a6927","Type":"ContainerStarted","Data":"1553ce7724824b7b9cba447ce07d145301b05fdf9d00ca516326d80b8010c703"} Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.015337 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" event={"ID":"6bcd32d0-1f64-48d5-b9d9-5556573a6927","Type":"ContainerStarted","Data":"ca98f5629e437e00ea83187c7181b7cee0e469d4d34c9e71125d9fc7069768ad"} Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.030874 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tmm6k" event={"ID":"21498fe0-31d4-40b6-aa3a-c1cf4047c155","Type":"ContainerStarted","Data":"4495f6cbb4b8a80d932fea4b6cc128927304e76dab7e5323f604eea880ef7f96"} Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.039884 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-g4spg" podStartSLOduration=9.039870708 podStartE2EDuration="9.039870708s" podCreationTimestamp="2026-01-22 05:20:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.038962647 +0000 UTC m=+145.122450862" watchObservedRunningTime="2026-01-22 05:20:59.039870708 +0000 UTC m=+145.123358923" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.080817 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.082579 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.582559728 +0000 UTC m=+145.666047943 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.120828 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5cczv" podStartSLOduration=125.120814478 podStartE2EDuration="2m5.120814478s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.114256567 +0000 UTC m=+145.197744782" watchObservedRunningTime="2026-01-22 05:20:59.120814478 +0000 UTC m=+145.204302693" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.161515 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-2w4zq" podStartSLOduration=9.1615008 podStartE2EDuration="9.1615008s" podCreationTimestamp="2026-01-22 05:20:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.159060038 +0000 UTC m=+145.242548253" watchObservedRunningTime="2026-01-22 05:20:59.1615008 +0000 UTC m=+145.244989015" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.170277 4814 patch_prober.go:28] interesting pod/router-default-5444994796-xd5fb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:20:59 crc kubenswrapper[4814]: [-]has-synced failed: reason withheld Jan 22 05:20:59 crc kubenswrapper[4814]: [+]process-running ok Jan 22 05:20:59 crc kubenswrapper[4814]: healthz check failed Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.170321 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xd5fb" podUID="2a31ffca-b39b-4c88-af05-b56eb149e248" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.183382 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.184899 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.684884159 +0000 UTC m=+145.768372374 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.211724 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dn9k6" podStartSLOduration=125.211709935 podStartE2EDuration="2m5.211709935s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.211341502 +0000 UTC m=+145.294829717" watchObservedRunningTime="2026-01-22 05:20:59.211709935 +0000 UTC m=+145.295198150" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.284092 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.284526 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.784508821 +0000 UTC m=+145.867997036 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.293104 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-bmlds" podStartSLOduration=125.293089099 podStartE2EDuration="2m5.293089099s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.290082419 +0000 UTC m=+145.373570634" watchObservedRunningTime="2026-01-22 05:20:59.293089099 +0000 UTC m=+145.376577314" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.329192 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-kst8c" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.388494 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.389050 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.889039236 +0000 UTC m=+145.972527451 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.392610 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" podStartSLOduration=125.392595967 podStartE2EDuration="2m5.392595967s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.34913428 +0000 UTC m=+145.432622485" watchObservedRunningTime="2026-01-22 05:20:59.392595967 +0000 UTC m=+145.476084182" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.415754 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" podStartSLOduration=125.415738047 podStartE2EDuration="2m5.415738047s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.396008392 +0000 UTC m=+145.479496597" watchObservedRunningTime="2026-01-22 05:20:59.415738047 +0000 UTC m=+145.499226262" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.417023 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-mc5wq" podStartSLOduration=125.4170189 podStartE2EDuration="2m5.4170189s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.415570482 +0000 UTC m=+145.499058697" watchObservedRunningTime="2026-01-22 05:20:59.4170189 +0000 UTC m=+145.500507115" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.489471 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.489828 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.989812266 +0000 UTC m=+146.073300481 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.490099 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.490503 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:20:59.990493869 +0000 UTC m=+146.073982084 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.524802 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-qkcnm" podStartSLOduration=125.524785776 podStartE2EDuration="2m5.524785776s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:20:59.48342461 +0000 UTC m=+145.566912825" watchObservedRunningTime="2026-01-22 05:20:59.524785776 +0000 UTC m=+145.608273991" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.591661 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.592204 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.09218929 +0000 UTC m=+146.175677505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.693204 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.693668 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.193657182 +0000 UTC m=+146.277145397 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.733472 4814 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-n6gln container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.733682 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" podUID="f5552558-72a7-40d0-a265-450ce55c22ad" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.18:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.794861 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.795050 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.295023822 +0000 UTC m=+146.378512037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.795347 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.795745 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.295730926 +0000 UTC m=+146.379219141 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.896705 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.897016 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.396990232 +0000 UTC m=+146.480478447 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.897132 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.897414 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.397403006 +0000 UTC m=+146.480891221 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.998461 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.998619 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.498593219 +0000 UTC m=+146.582081434 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:20:59 crc kubenswrapper[4814]: I0122 05:20:59.998962 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:20:59 crc kubenswrapper[4814]: E0122 05:20:59.999214 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.49920411 +0000 UTC m=+146.582692325 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.031023 4814 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-mhktc container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.11:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.031250 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" podUID="cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.11:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.058401 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" event={"ID":"b6998da9-cc02-4da8-b3d4-c02f32318b6f","Type":"ContainerStarted","Data":"82a75a2b8aee09746631f6916e805ff6d4519bcff260b8eafc1bb2fb56cfadd5"} Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.058442 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" event={"ID":"b6998da9-cc02-4da8-b3d4-c02f32318b6f","Type":"ContainerStarted","Data":"3df968b7a571bd3afbec0a4a8a25f4176eb2ea65b3cf6e659dcd12624a347616"} Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.064893 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" event={"ID":"b03e343f-bcd0-45f8-8ce0-962a6deb71db","Type":"ContainerStarted","Data":"09e9c8a5ba9633e8b96beac428718fba9f26599fcb31b66b56eeae7726b96eca"} Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.064975 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.080572 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" event={"ID":"805bbe6e-8f70-4666-9bb2-10ee278b883e","Type":"ContainerStarted","Data":"941d49d6b055ed924cf170848b049b4f3c010124439a523ff7b9eadbc556142a"} Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.084546 4814 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-g9h6j container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.084594 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" podUID="0b9f4ced-dcb7-458a-a111-71d67169f45b" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.084953 4814 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsdht container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.084984 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fsdht" podUID="63d01f3f-6487-4147-a862-70739c2c7961" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.115237 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" podStartSLOduration=127.115222864 podStartE2EDuration="2m7.115222864s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:21:00.113958861 +0000 UTC m=+146.197447076" watchObservedRunningTime="2026-01-22 05:21:00.115222864 +0000 UTC m=+146.198711079" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.118299 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.118766 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.618748603 +0000 UTC m=+146.702236818 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.132506 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.145789 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-db688" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.160768 4814 patch_prober.go:28] interesting pod/router-default-5444994796-xd5fb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:21:00 crc kubenswrapper[4814]: [-]has-synced failed: reason withheld Jan 22 05:21:00 crc kubenswrapper[4814]: [+]process-running ok Jan 22 05:21:00 crc kubenswrapper[4814]: healthz check failed Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.160810 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xd5fb" podUID="2a31ffca-b39b-4c88-af05-b56eb149e248" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.171036 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-bbjzl" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.193665 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-rq5tj" podStartSLOduration=126.19364938 podStartE2EDuration="2m6.19364938s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:21:00.146760018 +0000 UTC m=+146.230248233" watchObservedRunningTime="2026-01-22 05:21:00.19364938 +0000 UTC m=+146.277137595" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.221072 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.224038 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.724026604 +0000 UTC m=+146.807514819 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.322069 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.322278 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.822252188 +0000 UTC m=+146.905740403 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.322438 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.322828 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.822814547 +0000 UTC m=+146.906302762 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.423154 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.423340 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.923306027 +0000 UTC m=+147.006794232 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.423600 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.423907 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:00.923899607 +0000 UTC m=+147.007387822 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.465881 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-n6gln" Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.524219 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.524426 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.024399348 +0000 UTC m=+147.107887563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.626204 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.626614 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.126596985 +0000 UTC m=+147.210085200 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.727014 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.727333 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.227319303 +0000 UTC m=+147.310807518 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.828262 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.828535 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.328524817 +0000 UTC m=+147.412013032 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:00 crc kubenswrapper[4814]: I0122 05:21:00.929690 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:00 crc kubenswrapper[4814]: E0122 05:21:00.930078 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.430064253 +0000 UTC m=+147.513552468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.031035 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.031453 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.531430972 +0000 UTC m=+147.614919187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.087114 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" event={"ID":"b6998da9-cc02-4da8-b3d4-c02f32318b6f","Type":"ContainerStarted","Data":"94258e90c8038659222da28879080f92929ba3e4a59afa68848869d6e3b34852"} Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.087208 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" event={"ID":"b6998da9-cc02-4da8-b3d4-c02f32318b6f","Type":"ContainerStarted","Data":"b21e173c5add17c39ca97836b451aada9c9ac9f0a7a93f8ffda133d6383fd971"} Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.118275 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" podStartSLOduration=11.118248461 podStartE2EDuration="11.118248461s" podCreationTimestamp="2026-01-22 05:20:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:21:01.115412265 +0000 UTC m=+147.198900480" watchObservedRunningTime="2026-01-22 05:21:01.118248461 +0000 UTC m=+147.201736676" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.132186 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.132509 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.63248335 +0000 UTC m=+147.715971565 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.157598 4814 patch_prober.go:28] interesting pod/router-default-5444994796-xd5fb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:21:01 crc kubenswrapper[4814]: [-]has-synced failed: reason withheld Jan 22 05:21:01 crc kubenswrapper[4814]: [+]process-running ok Jan 22 05:21:01 crc kubenswrapper[4814]: healthz check failed Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.157918 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xd5fb" podUID="2a31ffca-b39b-4c88-af05-b56eb149e248" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.233986 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.235211 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.735179325 +0000 UTC m=+147.818667530 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.334517 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vwp7v"] Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.335271 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.335472 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.835442177 +0000 UTC m=+147.918930392 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.335497 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.335568 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.335607 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.335673 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.335702 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.336067 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.836053928 +0000 UTC m=+147.919542143 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.336489 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.337640 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.344237 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.363392 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.363691 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vwp7v"] Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.370728 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.437328 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.437899 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.438038 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-catalog-content\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.438175 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-utilities\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.438430 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps6zm\" (UniqueName: \"kubernetes.io/projected/8d371e5b-a490-441d-90c3-ead8479f81dc-kube-api-access-ps6zm\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.438713 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:01.93867061 +0000 UTC m=+148.022158825 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.446853 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.527298 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b84wr"] Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.528161 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.532883 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.539262 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps6zm\" (UniqueName: \"kubernetes.io/projected/8d371e5b-a490-441d-90c3-ead8479f81dc-kube-api-access-ps6zm\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.539457 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-catalog-content\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.539597 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-utilities\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.539707 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.539889 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-catalog-content\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.539972 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-utilities\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.540155 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.040133313 +0000 UTC m=+148.123621558 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.553958 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b84wr"] Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.588835 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.589654 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.602085 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps6zm\" (UniqueName: \"kubernetes.io/projected/8d371e5b-a490-441d-90c3-ead8479f81dc-kube-api-access-ps6zm\") pod \"certified-operators-vwp7v\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.644153 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.644281 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.144252435 +0000 UTC m=+148.227740650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.644379 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-catalog-content\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.644406 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9t2z\" (UniqueName: \"kubernetes.io/projected/dfc03373-04f8-49da-a3d6-5428a0324db5-kube-api-access-d9t2z\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.644442 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-utilities\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.644497 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.644786 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.144772632 +0000 UTC m=+148.228260847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.668834 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.699082 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.699133 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.733973 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.736052 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5njqz"] Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.736920 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.752024 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.752259 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-catalog-content\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.752287 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9t2z\" (UniqueName: \"kubernetes.io/projected/dfc03373-04f8-49da-a3d6-5428a0324db5-kube-api-access-d9t2z\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.752318 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-utilities\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.752584 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.252565939 +0000 UTC m=+148.336054154 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.752745 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-utilities\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.753119 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-catalog-content\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.765239 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5njqz"] Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.785306 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9t2z\" (UniqueName: \"kubernetes.io/projected/dfc03373-04f8-49da-a3d6-5428a0324db5-kube-api-access-d9t2z\") pod \"community-operators-b84wr\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.814171 4814 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.840272 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.853023 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-utilities\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.853107 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.853143 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-catalog-content\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.853185 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkwss\" (UniqueName: \"kubernetes.io/projected/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-kube-api-access-mkwss\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.854696 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.354684923 +0000 UTC m=+148.438173138 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.930486 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sgjff"] Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.933204 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.952557 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sgjff"] Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.955187 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.955384 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-catalog-content\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.955415 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkwss\" (UniqueName: \"kubernetes.io/projected/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-kube-api-access-mkwss\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.955473 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-utilities\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.956127 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-utilities\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: E0122 05:21:01.956199 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.456184688 +0000 UTC m=+148.539672903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.956397 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-catalog-content\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:01 crc kubenswrapper[4814]: I0122 05:21:01.988196 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkwss\" (UniqueName: \"kubernetes.io/projected/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-kube-api-access-mkwss\") pod \"certified-operators-5njqz\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.060232 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htkgt\" (UniqueName: \"kubernetes.io/projected/66f44c43-4c03-4af5-bba9-849b3d9b8724-kube-api-access-htkgt\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.060280 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-catalog-content\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.060303 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.060323 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-utilities\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: E0122 05:21:02.060580 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.560569259 +0000 UTC m=+148.644057474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.105872 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.150614 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"73530c1f2ed759f0efdf3ec5221fe23f5cbdee5be4e724caff294f375608f37b"} Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.161219 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.161570 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htkgt\" (UniqueName: \"kubernetes.io/projected/66f44c43-4c03-4af5-bba9-849b3d9b8724-kube-api-access-htkgt\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.161609 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-catalog-content\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.161651 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-utilities\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.161996 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-utilities\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: E0122 05:21:02.162036 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.662019192 +0000 UTC m=+148.745507407 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.162214 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-catalog-content\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.171941 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-hvr97" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.175416 4814 patch_prober.go:28] interesting pod/router-default-5444994796-xd5fb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:21:02 crc kubenswrapper[4814]: [-]has-synced failed: reason withheld Jan 22 05:21:02 crc kubenswrapper[4814]: [+]process-running ok Jan 22 05:21:02 crc kubenswrapper[4814]: healthz check failed Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.175455 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xd5fb" podUID="2a31ffca-b39b-4c88-af05-b56eb149e248" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.194344 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vwp7v"] Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.229296 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htkgt\" (UniqueName: \"kubernetes.io/projected/66f44c43-4c03-4af5-bba9-849b3d9b8724-kube-api-access-htkgt\") pod \"community-operators-sgjff\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.262911 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.264864 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:02 crc kubenswrapper[4814]: E0122 05:21:02.266005 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.76599131 +0000 UTC m=+148.849479525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.363514 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:02 crc kubenswrapper[4814]: E0122 05:21:02.366341 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.866323984 +0000 UTC m=+148.949812199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.423524 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b84wr"] Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.463133 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.463701 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.468214 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.468454 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.469334 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:02 crc kubenswrapper[4814]: E0122 05:21:02.469606 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-22 05:21:02.969595938 +0000 UTC m=+149.053084153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kt2c2" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.475969 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.518122 4814 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-22T05:21:01.814188738Z","Handler":null,"Name":""} Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.520944 4814 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.520965 4814 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.572984 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.573201 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75362bde-f94a-48d2-a36c-75c03967c08e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"75362bde-f94a-48d2-a36c-75c03967c08e\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.573230 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75362bde-f94a-48d2-a36c-75c03967c08e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"75362bde-f94a-48d2-a36c-75c03967c08e\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.675611 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75362bde-f94a-48d2-a36c-75c03967c08e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"75362bde-f94a-48d2-a36c-75c03967c08e\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.675678 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75362bde-f94a-48d2-a36c-75c03967c08e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"75362bde-f94a-48d2-a36c-75c03967c08e\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.676043 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75362bde-f94a-48d2-a36c-75c03967c08e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"75362bde-f94a-48d2-a36c-75c03967c08e\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.716498 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75362bde-f94a-48d2-a36c-75c03967c08e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"75362bde-f94a-48d2-a36c-75c03967c08e\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.740662 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-frrqc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.791660 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.813913 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.837152 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5njqz"] Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.885803 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.890973 4814 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.891014 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.952503 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kt2c2\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:02 crc kubenswrapper[4814]: I0122 05:21:02.996486 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sgjff"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.078465 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.090357 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.102674 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.103257 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.111937 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.112289 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.112799 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.112892 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.113513 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.115091 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.116937 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.118659 4814 patch_prober.go:28] interesting pod/console-f9d7485db-jnnrg container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.118718 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-jnnrg" podUID="b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.133696 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.160301 4814 patch_prober.go:28] interesting pod/router-default-5444994796-xd5fb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:21:03 crc kubenswrapper[4814]: [-]has-synced failed: reason withheld Jan 22 05:21:03 crc kubenswrapper[4814]: [+]process-running ok Jan 22 05:21:03 crc kubenswrapper[4814]: healthz check failed Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.160342 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xd5fb" podUID="2a31ffca-b39b-4c88-af05-b56eb149e248" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.173342 4814 generic.go:334] "Generic (PLEG): container finished" podID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerID="ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55" exitCode=0 Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.173426 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5njqz" event={"ID":"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f","Type":"ContainerDied","Data":"ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.173452 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5njqz" event={"ID":"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f","Type":"ContainerStarted","Data":"efb50102ef7a6a0b1bacb1b63adf04e254c0f98ee588f7f1c033a06b04bf6705"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.174801 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sgjff" event={"ID":"66f44c43-4c03-4af5-bba9-849b3d9b8724","Type":"ContainerStarted","Data":"8535d532da3376ec3c40b7171f345855ce3f4d30930de54fcee6c580720169ba"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.175955 4814 generic.go:334] "Generic (PLEG): container finished" podID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerID="05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33" exitCode=0 Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.176006 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwp7v" event={"ID":"8d371e5b-a490-441d-90c3-ead8479f81dc","Type":"ContainerDied","Data":"05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.176022 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwp7v" event={"ID":"8d371e5b-a490-441d-90c3-ead8479f81dc","Type":"ContainerStarted","Data":"3ad88bf61ea8b8beda837318abed0b1ae109c5242e69b90ca970e7b99b4772d0"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.177413 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f728ef6dec0426f5778420c39649ca3ee804fc510e0bdc8763dc2dc2c5d16cc3"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.177438 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"858ba071bf3ed01914f7219cfa864c54bdd6205f5a1f0a8678ded904dd41a197"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.177863 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.181246 4814 generic.go:334] "Generic (PLEG): container finished" podID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerID="86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961" exitCode=0 Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.181298 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b84wr" event={"ID":"dfc03373-04f8-49da-a3d6-5428a0324db5","Type":"ContainerDied","Data":"86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.181331 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b84wr" event={"ID":"dfc03373-04f8-49da-a3d6-5428a0324db5","Type":"ContainerStarted","Data":"b4820c45a4fdab458d9335112a83cbc76229d5a7163425777666302600f73686"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.189062 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.189129 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.189614 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"74235ccf895c6f229bc0c027d8dcfd54815d3bb0c9aa0ecb2919ed52b191dcfb"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.194040 4814 generic.go:334] "Generic (PLEG): container finished" podID="fcca12ae-2952-47fb-b97c-6d913948ae44" containerID="f58812cb612307406e46c22b6aded9552ae2b24ddb1335bd69d86f74abe6bf49" exitCode=0 Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.194221 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" event={"ID":"fcca12ae-2952-47fb-b97c-6d913948ae44","Type":"ContainerDied","Data":"f58812cb612307406e46c22b6aded9552ae2b24ddb1335bd69d86f74abe6bf49"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.196923 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.201388 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"75362bde-f94a-48d2-a36c-75c03967c08e","Type":"ContainerStarted","Data":"e1e034eb8d5b17872f7ade1df081858f6c60e218a41647ab79cfdc712b26c3e1"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.205551 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"7b1d8e3fa8ad6d6d7bf697b6408d61ab81d5d42512e0178e90584dfaecb1c9f1"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.205593 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"7788998a0d0d8085443698b8214ccc6fff17e7bf05d8d464735216a80f66af5a"} Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.215280 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-xmp5c" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.290278 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.290569 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.292829 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.339396 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.381026 4814 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsdht container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.381073 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-fsdht" podUID="63d01f3f-6487-4147-a862-70739c2c7961" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.381310 4814 patch_prober.go:28] interesting pod/downloads-7954f5f757-fsdht container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.381416 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-fsdht" podUID="63d01f3f-6487-4147-a862-70739c2c7961" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.472960 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.532738 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sl7ng"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.533685 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.536065 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.566671 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sl7ng"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.598204 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-catalog-content\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.599384 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-utilities\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.599427 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5jsl\" (UniqueName: \"kubernetes.io/projected/33942fc4-20af-4f7f-a3db-e04a2356e2db-kube-api-access-v5jsl\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.700389 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-catalog-content\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.700433 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-utilities\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.700463 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5jsl\" (UniqueName: \"kubernetes.io/projected/33942fc4-20af-4f7f-a3db-e04a2356e2db-kube-api-access-v5jsl\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.701217 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-catalog-content\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.701337 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-utilities\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.717969 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5jsl\" (UniqueName: \"kubernetes.io/projected/33942fc4-20af-4f7f-a3db-e04a2356e2db-kube-api-access-v5jsl\") pod \"redhat-marketplace-sl7ng\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.772044 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kt2c2"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.820619 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.848548 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.888949 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.935664 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8qcxd"] Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.936664 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:03 crc kubenswrapper[4814]: I0122 05:21:03.946954 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qcxd"] Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.006692 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-catalog-content\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.006800 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pgm6\" (UniqueName: \"kubernetes.io/projected/2b42b8af-781f-4a40-bc43-658de04b12a6-kube-api-access-5pgm6\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.006845 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-utilities\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.110306 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pgm6\" (UniqueName: \"kubernetes.io/projected/2b42b8af-781f-4a40-bc43-658de04b12a6-kube-api-access-5pgm6\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.110352 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-utilities\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.110400 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-catalog-content\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.110911 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-catalog-content\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.114456 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-utilities\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.142584 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pgm6\" (UniqueName: \"kubernetes.io/projected/2b42b8af-781f-4a40-bc43-658de04b12a6-kube-api-access-5pgm6\") pod \"redhat-marketplace-8qcxd\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.152678 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.159340 4814 patch_prober.go:28] interesting pod/router-default-5444994796-xd5fb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 22 05:21:04 crc kubenswrapper[4814]: [-]has-synced failed: reason withheld Jan 22 05:21:04 crc kubenswrapper[4814]: [+]process-running ok Jan 22 05:21:04 crc kubenswrapper[4814]: healthz check failed Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.159407 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xd5fb" podUID="2a31ffca-b39b-4c88-af05-b56eb149e248" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.184513 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sl7ng"] Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.218943 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"75362bde-f94a-48d2-a36c-75c03967c08e","Type":"ContainerStarted","Data":"538b42f42fd7c2bca9d91a41d5c5dfb1870b41b3cc96fae3703909abe8ea62cd"} Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.221417 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177","Type":"ContainerStarted","Data":"8d082554a43eb2951f9f0e20b15f3a7cb5698fafe2f192abb0425b3290181aba"} Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.230167 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" event={"ID":"29aae90f-3db5-4e31-a13e-35049f8ff2de","Type":"ContainerStarted","Data":"3b7ffe7b7496385f394bc2049cf61899beef070aefbf52307b0f132997343822"} Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.230207 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" event={"ID":"29aae90f-3db5-4e31-a13e-35049f8ff2de","Type":"ContainerStarted","Data":"332f5aeed2d509187bcae51bef3684639d6522adf6fc56adedca1b486a77b1a9"} Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.230330 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.233928 4814 generic.go:334] "Generic (PLEG): container finished" podID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerID="42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617" exitCode=0 Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.234855 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sgjff" event={"ID":"66f44c43-4c03-4af5-bba9-849b3d9b8724","Type":"ContainerDied","Data":"42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617"} Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.246023 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.246004674 podStartE2EDuration="2.246004674s" podCreationTimestamp="2026-01-22 05:21:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:21:04.2381833 +0000 UTC m=+150.321671505" watchObservedRunningTime="2026-01-22 05:21:04.246004674 +0000 UTC m=+150.329492889" Jan 22 05:21:04 crc kubenswrapper[4814]: W0122 05:21:04.252553 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33942fc4_20af_4f7f_a3db_e04a2356e2db.slice/crio-669736cf1229424b42a57a4d5f8ee45186a2df08fca736a8a373458b4c41c947 WatchSource:0}: Error finding container 669736cf1229424b42a57a4d5f8ee45186a2df08fca736a8a373458b4c41c947: Status 404 returned error can't find the container with id 669736cf1229424b42a57a4d5f8ee45186a2df08fca736a8a373458b4c41c947 Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.284439 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" podStartSLOduration=130.28442074 podStartE2EDuration="2m10.28442074s" podCreationTimestamp="2026-01-22 05:18:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:21:04.26131335 +0000 UTC m=+150.344801565" watchObservedRunningTime="2026-01-22 05:21:04.28442074 +0000 UTC m=+150.367908955" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.286680 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.395599 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.529486 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t7mps"] Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.530786 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.544253 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.546827 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t7mps"] Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.630314 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlpgt\" (UniqueName: \"kubernetes.io/projected/24689da3-97aa-4d34-ad33-4fdb8950e6a9-kube-api-access-nlpgt\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.630397 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-catalog-content\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.630451 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-utilities\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.731418 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlpgt\" (UniqueName: \"kubernetes.io/projected/24689da3-97aa-4d34-ad33-4fdb8950e6a9-kube-api-access-nlpgt\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.731481 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-catalog-content\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.731526 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-utilities\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.731880 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-utilities\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.732086 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-catalog-content\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.783112 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlpgt\" (UniqueName: \"kubernetes.io/projected/24689da3-97aa-4d34-ad33-4fdb8950e6a9-kube-api-access-nlpgt\") pod \"redhat-operators-t7mps\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.862223 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.882729 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.932483 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qcxd"] Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.947077 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7tr77"] Jan 22 05:21:04 crc kubenswrapper[4814]: E0122 05:21:04.947379 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcca12ae-2952-47fb-b97c-6d913948ae44" containerName="collect-profiles" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.947391 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcca12ae-2952-47fb-b97c-6d913948ae44" containerName="collect-profiles" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.947542 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcca12ae-2952-47fb-b97c-6d913948ae44" containerName="collect-profiles" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.948005 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcca12ae-2952-47fb-b97c-6d913948ae44-config-volume\") pod \"fcca12ae-2952-47fb-b97c-6d913948ae44\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.948068 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqz9q\" (UniqueName: \"kubernetes.io/projected/fcca12ae-2952-47fb-b97c-6d913948ae44-kube-api-access-sqz9q\") pod \"fcca12ae-2952-47fb-b97c-6d913948ae44\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.948112 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcca12ae-2952-47fb-b97c-6d913948ae44-secret-volume\") pod \"fcca12ae-2952-47fb-b97c-6d913948ae44\" (UID: \"fcca12ae-2952-47fb-b97c-6d913948ae44\") " Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.948760 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.949835 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcca12ae-2952-47fb-b97c-6d913948ae44-config-volume" (OuterVolumeSpecName: "config-volume") pod "fcca12ae-2952-47fb-b97c-6d913948ae44" (UID: "fcca12ae-2952-47fb-b97c-6d913948ae44"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.959104 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcca12ae-2952-47fb-b97c-6d913948ae44-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fcca12ae-2952-47fb-b97c-6d913948ae44" (UID: "fcca12ae-2952-47fb-b97c-6d913948ae44"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.963938 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcca12ae-2952-47fb-b97c-6d913948ae44-kube-api-access-sqz9q" (OuterVolumeSpecName: "kube-api-access-sqz9q") pod "fcca12ae-2952-47fb-b97c-6d913948ae44" (UID: "fcca12ae-2952-47fb-b97c-6d913948ae44"). InnerVolumeSpecName "kube-api-access-sqz9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:21:04 crc kubenswrapper[4814]: I0122 05:21:04.969177 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7tr77"] Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.048886 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-catalog-content\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.048922 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-utilities\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.048941 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4xtm\" (UniqueName: \"kubernetes.io/projected/82fb31a6-1417-4ef9-a19b-1e877ce55477-kube-api-access-t4xtm\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.049032 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcca12ae-2952-47fb-b97c-6d913948ae44-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.049044 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqz9q\" (UniqueName: \"kubernetes.io/projected/fcca12ae-2952-47fb-b97c-6d913948ae44-kube-api-access-sqz9q\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.049054 4814 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcca12ae-2952-47fb-b97c-6d913948ae44-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.149924 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-catalog-content\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.149954 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-utilities\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.149975 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4xtm\" (UniqueName: \"kubernetes.io/projected/82fb31a6-1417-4ef9-a19b-1e877ce55477-kube-api-access-t4xtm\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.150811 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-catalog-content\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.151020 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-utilities\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.165364 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.169157 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-xd5fb" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.173873 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4xtm\" (UniqueName: \"kubernetes.io/projected/82fb31a6-1417-4ef9-a19b-1e877ce55477-kube-api-access-t4xtm\") pod \"redhat-operators-7tr77\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.267462 4814 generic.go:334] "Generic (PLEG): container finished" podID="75362bde-f94a-48d2-a36c-75c03967c08e" containerID="538b42f42fd7c2bca9d91a41d5c5dfb1870b41b3cc96fae3703909abe8ea62cd" exitCode=0 Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.267524 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"75362bde-f94a-48d2-a36c-75c03967c08e","Type":"ContainerDied","Data":"538b42f42fd7c2bca9d91a41d5c5dfb1870b41b3cc96fae3703909abe8ea62cd"} Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.310646 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.330325 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" event={"ID":"fcca12ae-2952-47fb-b97c-6d913948ae44","Type":"ContainerDied","Data":"96ad6a7180c753a59eae8da09aedca3106dba94542c6e2e9aed6f9da910bc0c1"} Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.330376 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96ad6a7180c753a59eae8da09aedca3106dba94542c6e2e9aed6f9da910bc0c1" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.330472 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt" Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.374202 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qcxd" event={"ID":"2b42b8af-781f-4a40-bc43-658de04b12a6","Type":"ContainerStarted","Data":"50c1f304e1938140a3a9ee16f646e73a0735d19258981355c0a29b1240bab577"} Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.392093 4814 generic.go:334] "Generic (PLEG): container finished" podID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerID="ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15" exitCode=0 Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.392168 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sl7ng" event={"ID":"33942fc4-20af-4f7f-a3db-e04a2356e2db","Type":"ContainerDied","Data":"ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15"} Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.392194 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sl7ng" event={"ID":"33942fc4-20af-4f7f-a3db-e04a2356e2db","Type":"ContainerStarted","Data":"669736cf1229424b42a57a4d5f8ee45186a2df08fca736a8a373458b4c41c947"} Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.395403 4814 generic.go:334] "Generic (PLEG): container finished" podID="a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177" containerID="f0279cd427a2e17bd3a673578fe2318cde4364f6f1e50aba3355203d2772fb26" exitCode=0 Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.396074 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177","Type":"ContainerDied","Data":"f0279cd427a2e17bd3a673578fe2318cde4364f6f1e50aba3355203d2772fb26"} Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.616912 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t7mps"] Jan 22 05:21:05 crc kubenswrapper[4814]: I0122 05:21:05.658491 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7tr77"] Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.429408 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7tr77" event={"ID":"82fb31a6-1417-4ef9-a19b-1e877ce55477","Type":"ContainerStarted","Data":"7e21924bcd77ee29f5338c704c3020e49eb884fca6f602b4061cd23ff981462e"} Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.432867 4814 generic.go:334] "Generic (PLEG): container finished" podID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerID="5da372f26f09bc598a177f78140d6205eb105f7c6e531641eafdb88cd4fa30fc" exitCode=0 Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.432913 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qcxd" event={"ID":"2b42b8af-781f-4a40-bc43-658de04b12a6","Type":"ContainerDied","Data":"5da372f26f09bc598a177f78140d6205eb105f7c6e531641eafdb88cd4fa30fc"} Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.442502 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7mps" event={"ID":"24689da3-97aa-4d34-ad33-4fdb8950e6a9","Type":"ContainerStarted","Data":"178c764cc3e28a7997216fcdd21c06e8d01bc9255bc5596213a733db579e1edc"} Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.882069 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.932346 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.982727 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75362bde-f94a-48d2-a36c-75c03967c08e-kube-api-access\") pod \"75362bde-f94a-48d2-a36c-75c03967c08e\" (UID: \"75362bde-f94a-48d2-a36c-75c03967c08e\") " Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.982811 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75362bde-f94a-48d2-a36c-75c03967c08e-kubelet-dir\") pod \"75362bde-f94a-48d2-a36c-75c03967c08e\" (UID: \"75362bde-f94a-48d2-a36c-75c03967c08e\") " Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.982898 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kubelet-dir\") pod \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\" (UID: \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\") " Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.982934 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kube-api-access\") pod \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\" (UID: \"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177\") " Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.983282 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/75362bde-f94a-48d2-a36c-75c03967c08e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "75362bde-f94a-48d2-a36c-75c03967c08e" (UID: "75362bde-f94a-48d2-a36c-75c03967c08e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.983311 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177" (UID: "a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.993175 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177" (UID: "a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:21:06 crc kubenswrapper[4814]: I0122 05:21:06.995860 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75362bde-f94a-48d2-a36c-75c03967c08e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "75362bde-f94a-48d2-a36c-75c03967c08e" (UID: "75362bde-f94a-48d2-a36c-75c03967c08e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.088186 4814 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.088217 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.088227 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75362bde-f94a-48d2-a36c-75c03967c08e-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.088236 4814 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75362bde-f94a-48d2-a36c-75c03967c08e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.457082 4814 generic.go:334] "Generic (PLEG): container finished" podID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerID="075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8" exitCode=0 Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.457165 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7mps" event={"ID":"24689da3-97aa-4d34-ad33-4fdb8950e6a9","Type":"ContainerDied","Data":"075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8"} Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.467422 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177","Type":"ContainerDied","Data":"8d082554a43eb2951f9f0e20b15f3a7cb5698fafe2f192abb0425b3290181aba"} Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.467457 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d082554a43eb2951f9f0e20b15f3a7cb5698fafe2f192abb0425b3290181aba" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.467538 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.473610 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"75362bde-f94a-48d2-a36c-75c03967c08e","Type":"ContainerDied","Data":"e1e034eb8d5b17872f7ade1df081858f6c60e218a41647ab79cfdc712b26c3e1"} Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.473658 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1e034eb8d5b17872f7ade1df081858f6c60e218a41647ab79cfdc712b26c3e1" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.473738 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.485288 4814 generic.go:334] "Generic (PLEG): container finished" podID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerID="8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7" exitCode=0 Jan 22 05:21:07 crc kubenswrapper[4814]: I0122 05:21:07.485327 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7tr77" event={"ID":"82fb31a6-1417-4ef9-a19b-1e877ce55477","Type":"ContainerDied","Data":"8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7"} Jan 22 05:21:08 crc kubenswrapper[4814]: I0122 05:21:08.959691 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-g4spg" Jan 22 05:21:13 crc kubenswrapper[4814]: I0122 05:21:13.379065 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:21:13 crc kubenswrapper[4814]: I0122 05:21:13.389871 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-fsdht" Jan 22 05:21:13 crc kubenswrapper[4814]: I0122 05:21:13.390093 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:21:17 crc kubenswrapper[4814]: I0122 05:21:17.344447 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:21:17 crc kubenswrapper[4814]: I0122 05:21:17.363411 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33d4bb42-6c3b-4a42-bf7b-bb9a780f7873-metrics-certs\") pod \"network-metrics-daemon-nmwv2\" (UID: \"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873\") " pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:21:17 crc kubenswrapper[4814]: I0122 05:21:17.583164 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nmwv2" Jan 22 05:21:19 crc kubenswrapper[4814]: I0122 05:21:19.614706 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:21:19 crc kubenswrapper[4814]: I0122 05:21:19.614749 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:21:23 crc kubenswrapper[4814]: I0122 05:21:23.085380 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:21:31 crc kubenswrapper[4814]: E0122 05:21:31.903687 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 22 05:21:31 crc kubenswrapper[4814]: E0122 05:21:31.904274 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t4xtm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-7tr77_openshift-marketplace(82fb31a6-1417-4ef9-a19b-1e877ce55477): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:21:31 crc kubenswrapper[4814]: E0122 05:21:31.905446 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-7tr77" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" Jan 22 05:21:33 crc kubenswrapper[4814]: I0122 05:21:33.295503 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-dh628" Jan 22 05:21:33 crc kubenswrapper[4814]: E0122 05:21:33.992607 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-7tr77" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" Jan 22 05:21:34 crc kubenswrapper[4814]: E0122 05:21:34.111133 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 22 05:21:34 crc kubenswrapper[4814]: E0122 05:21:34.111289 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mkwss,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-5njqz_openshift-marketplace(46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:21:34 crc kubenswrapper[4814]: E0122 05:21:34.112574 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-5njqz" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" Jan 22 05:21:34 crc kubenswrapper[4814]: E0122 05:21:34.131153 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 22 05:21:34 crc kubenswrapper[4814]: E0122 05:21:34.131241 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-htkgt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-sgjff_openshift-marketplace(66f44c43-4c03-4af5-bba9-849b3d9b8724): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:21:34 crc kubenswrapper[4814]: E0122 05:21:34.132557 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-sgjff" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" Jan 22 05:21:36 crc kubenswrapper[4814]: E0122 05:21:36.186019 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-sgjff" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" Jan 22 05:21:36 crc kubenswrapper[4814]: E0122 05:21:36.186253 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-5njqz" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" Jan 22 05:21:37 crc kubenswrapper[4814]: E0122 05:21:37.103115 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 22 05:21:37 crc kubenswrapper[4814]: E0122 05:21:37.103267 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v5jsl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-sl7ng_openshift-marketplace(33942fc4-20af-4f7f-a3db-e04a2356e2db): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 22 05:21:37 crc kubenswrapper[4814]: E0122 05:21:37.105078 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-sl7ng" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" Jan 22 05:21:37 crc kubenswrapper[4814]: I0122 05:21:37.576173 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-nmwv2"] Jan 22 05:21:37 crc kubenswrapper[4814]: I0122 05:21:37.671981 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" event={"ID":"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873","Type":"ContainerStarted","Data":"99afbcaf62faae0b2197f63baac560e36304a280cb6a300a6859b7b7df26b0aa"} Jan 22 05:21:37 crc kubenswrapper[4814]: E0122 05:21:37.673376 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-sl7ng" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" Jan 22 05:21:38 crc kubenswrapper[4814]: I0122 05:21:38.677428 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7mps" event={"ID":"24689da3-97aa-4d34-ad33-4fdb8950e6a9","Type":"ContainerStarted","Data":"0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb"} Jan 22 05:21:38 crc kubenswrapper[4814]: I0122 05:21:38.678826 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" event={"ID":"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873","Type":"ContainerStarted","Data":"607ddc9f476b3fe66c2ceb37859098e27fd7dd1b946f997cc0365955de93b4f9"} Jan 22 05:21:38 crc kubenswrapper[4814]: I0122 05:21:38.681023 4814 generic.go:334] "Generic (PLEG): container finished" podID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerID="96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d" exitCode=0 Jan 22 05:21:38 crc kubenswrapper[4814]: I0122 05:21:38.681081 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwp7v" event={"ID":"8d371e5b-a490-441d-90c3-ead8479f81dc","Type":"ContainerDied","Data":"96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d"} Jan 22 05:21:38 crc kubenswrapper[4814]: I0122 05:21:38.682794 4814 generic.go:334] "Generic (PLEG): container finished" podID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerID="99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e" exitCode=0 Jan 22 05:21:38 crc kubenswrapper[4814]: I0122 05:21:38.683216 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b84wr" event={"ID":"dfc03373-04f8-49da-a3d6-5428a0324db5","Type":"ContainerDied","Data":"99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e"} Jan 22 05:21:39 crc kubenswrapper[4814]: I0122 05:21:39.688997 4814 generic.go:334] "Generic (PLEG): container finished" podID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerID="0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb" exitCode=0 Jan 22 05:21:39 crc kubenswrapper[4814]: I0122 05:21:39.689292 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7mps" event={"ID":"24689da3-97aa-4d34-ad33-4fdb8950e6a9","Type":"ContainerDied","Data":"0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb"} Jan 22 05:21:39 crc kubenswrapper[4814]: I0122 05:21:39.691297 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nmwv2" event={"ID":"33d4bb42-6c3b-4a42-bf7b-bb9a780f7873","Type":"ContainerStarted","Data":"262b5b258cfb7acfabb8ef5d46bd8a7fe254b91cc9644326ab6cb7bc4cf60f3d"} Jan 22 05:21:39 crc kubenswrapper[4814]: I0122 05:21:39.722337 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-nmwv2" podStartSLOduration=166.72232282 podStartE2EDuration="2m46.72232282s" podCreationTimestamp="2026-01-22 05:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:21:39.719158723 +0000 UTC m=+185.802646938" watchObservedRunningTime="2026-01-22 05:21:39.72232282 +0000 UTC m=+185.805811035" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.593488 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.702960 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwp7v" event={"ID":"8d371e5b-a490-441d-90c3-ead8479f81dc","Type":"ContainerStarted","Data":"31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263"} Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.705778 4814 generic.go:334] "Generic (PLEG): container finished" podID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerID="48d3254fe9e4bec02fa66333be3f45b3607021c25df41a417095ff1ecdabb859" exitCode=0 Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.705840 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qcxd" event={"ID":"2b42b8af-781f-4a40-bc43-658de04b12a6","Type":"ContainerDied","Data":"48d3254fe9e4bec02fa66333be3f45b3607021c25df41a417095ff1ecdabb859"} Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.708845 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b84wr" event={"ID":"dfc03373-04f8-49da-a3d6-5428a0324db5","Type":"ContainerStarted","Data":"957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39"} Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.715478 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7mps" event={"ID":"24689da3-97aa-4d34-ad33-4fdb8950e6a9","Type":"ContainerStarted","Data":"b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081"} Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.730908 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vwp7v" podStartSLOduration=2.7777614980000003 podStartE2EDuration="40.730893718s" podCreationTimestamp="2026-01-22 05:21:01 +0000 UTC" firstStartedPulling="2026-01-22 05:21:03.197219824 +0000 UTC m=+149.280708039" lastFinishedPulling="2026-01-22 05:21:41.150352044 +0000 UTC m=+187.233840259" observedRunningTime="2026-01-22 05:21:41.72797666 +0000 UTC m=+187.811464875" watchObservedRunningTime="2026-01-22 05:21:41.730893718 +0000 UTC m=+187.814381933" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.756402 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t7mps" podStartSLOduration=3.695176591 podStartE2EDuration="37.756389409s" podCreationTimestamp="2026-01-22 05:21:04 +0000 UTC" firstStartedPulling="2026-01-22 05:21:07.46187326 +0000 UTC m=+153.545361475" lastFinishedPulling="2026-01-22 05:21:41.523086078 +0000 UTC m=+187.606574293" observedRunningTime="2026-01-22 05:21:41.754192574 +0000 UTC m=+187.837680789" watchObservedRunningTime="2026-01-22 05:21:41.756389409 +0000 UTC m=+187.839877624" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.774853 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b84wr" podStartSLOduration=2.816995901 podStartE2EDuration="40.774840211s" podCreationTimestamp="2026-01-22 05:21:01 +0000 UTC" firstStartedPulling="2026-01-22 05:21:03.196645814 +0000 UTC m=+149.280134029" lastFinishedPulling="2026-01-22 05:21:41.154490124 +0000 UTC m=+187.237978339" observedRunningTime="2026-01-22 05:21:41.774610613 +0000 UTC m=+187.858098828" watchObservedRunningTime="2026-01-22 05:21:41.774840211 +0000 UTC m=+187.858328426" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.824446 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mhktc"] Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.841239 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.841363 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.955610 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 05:21:41 crc kubenswrapper[4814]: E0122 05:21:41.955812 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177" containerName="pruner" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.955823 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177" containerName="pruner" Jan 22 05:21:41 crc kubenswrapper[4814]: E0122 05:21:41.955837 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75362bde-f94a-48d2-a36c-75c03967c08e" containerName="pruner" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.955842 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="75362bde-f94a-48d2-a36c-75c03967c08e" containerName="pruner" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.955938 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7f9a11d-e7dd-40d4-9cf2-c7b04e21a177" containerName="pruner" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.955949 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="75362bde-f94a-48d2-a36c-75c03967c08e" containerName="pruner" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.956262 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.959159 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.959707 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 22 05:21:41 crc kubenswrapper[4814]: I0122 05:21:41.973599 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.053484 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.053553 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.154690 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.154757 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.154848 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.184353 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.268287 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.721738 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qcxd" event={"ID":"2b42b8af-781f-4a40-bc43-658de04b12a6","Type":"ContainerStarted","Data":"f3d6ce757b0c82edcc6851b226e3c32bf1910160f81aaeb7c3689b24b4dac4fa"} Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.739162 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8qcxd" podStartSLOduration=3.003326931 podStartE2EDuration="39.739147376s" podCreationTimestamp="2026-01-22 05:21:03 +0000 UTC" firstStartedPulling="2026-01-22 05:21:05.377312738 +0000 UTC m=+151.460800953" lastFinishedPulling="2026-01-22 05:21:42.113133183 +0000 UTC m=+188.196621398" observedRunningTime="2026-01-22 05:21:42.737168544 +0000 UTC m=+188.820656749" watchObservedRunningTime="2026-01-22 05:21:42.739147376 +0000 UTC m=+188.822635591" Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.814283 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 22 05:21:42 crc kubenswrapper[4814]: I0122 05:21:42.992461 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-b84wr" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="registry-server" probeResult="failure" output=< Jan 22 05:21:42 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:21:42 crc kubenswrapper[4814]: > Jan 22 05:21:43 crc kubenswrapper[4814]: I0122 05:21:43.728635 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de","Type":"ContainerStarted","Data":"2a4ae7ccf27c8007d6168bd75ee0fc08806d6256f6eb0dab1e7beb84bb2094b7"} Jan 22 05:21:43 crc kubenswrapper[4814]: I0122 05:21:43.728886 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de","Type":"ContainerStarted","Data":"a11073b0f381ab6494cfc129edda0701e90f77431791356bfa503c974bcf23fb"} Jan 22 05:21:44 crc kubenswrapper[4814]: I0122 05:21:44.296902 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:44 crc kubenswrapper[4814]: I0122 05:21:44.297016 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:44 crc kubenswrapper[4814]: I0122 05:21:44.340369 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:44 crc kubenswrapper[4814]: I0122 05:21:44.362122 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=3.362102191 podStartE2EDuration="3.362102191s" podCreationTimestamp="2026-01-22 05:21:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:21:43.752803548 +0000 UTC m=+189.836291763" watchObservedRunningTime="2026-01-22 05:21:44.362102191 +0000 UTC m=+190.445590416" Jan 22 05:21:44 crc kubenswrapper[4814]: I0122 05:21:44.735642 4814 generic.go:334] "Generic (PLEG): container finished" podID="ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de" containerID="2a4ae7ccf27c8007d6168bd75ee0fc08806d6256f6eb0dab1e7beb84bb2094b7" exitCode=0 Jan 22 05:21:44 crc kubenswrapper[4814]: I0122 05:21:44.735726 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de","Type":"ContainerDied","Data":"2a4ae7ccf27c8007d6168bd75ee0fc08806d6256f6eb0dab1e7beb84bb2094b7"} Jan 22 05:21:44 crc kubenswrapper[4814]: I0122 05:21:44.863237 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:44 crc kubenswrapper[4814]: I0122 05:21:44.863299 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:45 crc kubenswrapper[4814]: I0122 05:21:45.914373 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t7mps" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="registry-server" probeResult="failure" output=< Jan 22 05:21:45 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:21:45 crc kubenswrapper[4814]: > Jan 22 05:21:45 crc kubenswrapper[4814]: I0122 05:21:45.975634 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.100493 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kubelet-dir\") pod \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\" (UID: \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\") " Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.100583 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kube-api-access\") pod \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\" (UID: \"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de\") " Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.101271 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de" (UID: "ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.105007 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de" (UID: "ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.202595 4814 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.203046 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.746736 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de","Type":"ContainerDied","Data":"a11073b0f381ab6494cfc129edda0701e90f77431791356bfa503c974bcf23fb"} Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.747000 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a11073b0f381ab6494cfc129edda0701e90f77431791356bfa503c974bcf23fb" Jan 22 05:21:46 crc kubenswrapper[4814]: I0122 05:21:46.746806 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.756208 4814 generic.go:334] "Generic (PLEG): container finished" podID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerID="d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376" exitCode=0 Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.756499 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5njqz" event={"ID":"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f","Type":"ContainerDied","Data":"d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376"} Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.759479 4814 generic.go:334] "Generic (PLEG): container finished" podID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerID="5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf" exitCode=0 Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.759503 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sgjff" event={"ID":"66f44c43-4c03-4af5-bba9-849b3d9b8724","Type":"ContainerDied","Data":"5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf"} Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.912848 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 05:21:48 crc kubenswrapper[4814]: E0122 05:21:48.913237 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de" containerName="pruner" Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.913265 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de" containerName="pruner" Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.913452 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecb0bf26-3ccf-4ccf-b88a-ce8a8afea4de" containerName="pruner" Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.914231 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.917065 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.924141 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 05:21:48 crc kubenswrapper[4814]: I0122 05:21:48.926169 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.048883 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-var-lock\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.049361 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.049448 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kube-api-access\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.150684 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-var-lock\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.151047 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.151262 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kube-api-access\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.151116 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kubelet-dir\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.150823 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-var-lock\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.174143 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kube-api-access\") pod \"installer-9-crc\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.247693 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.528522 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.617311 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.617365 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.769967 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71","Type":"ContainerStarted","Data":"3f6e13ff835e0dbaef126cd34235bbe5bb55b2a4ecfbf7b396b1e6954ab7e240"} Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.772275 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5njqz" event={"ID":"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f","Type":"ContainerStarted","Data":"9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4"} Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.779439 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sgjff" event={"ID":"66f44c43-4c03-4af5-bba9-849b3d9b8724","Type":"ContainerStarted","Data":"a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c"} Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.781057 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7tr77" event={"ID":"82fb31a6-1417-4ef9-a19b-1e877ce55477","Type":"ContainerStarted","Data":"8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a"} Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.791021 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5njqz" podStartSLOduration=2.708382204 podStartE2EDuration="48.791003636s" podCreationTimestamp="2026-01-22 05:21:01 +0000 UTC" firstStartedPulling="2026-01-22 05:21:03.197522894 +0000 UTC m=+149.281011109" lastFinishedPulling="2026-01-22 05:21:49.280144286 +0000 UTC m=+195.363632541" observedRunningTime="2026-01-22 05:21:49.789276903 +0000 UTC m=+195.872765118" watchObservedRunningTime="2026-01-22 05:21:49.791003636 +0000 UTC m=+195.874491851" Jan 22 05:21:49 crc kubenswrapper[4814]: I0122 05:21:49.808110 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sgjff" podStartSLOduration=3.880020231 podStartE2EDuration="48.808092655s" podCreationTimestamp="2026-01-22 05:21:01 +0000 UTC" firstStartedPulling="2026-01-22 05:21:04.235404646 +0000 UTC m=+150.318892861" lastFinishedPulling="2026-01-22 05:21:49.16347703 +0000 UTC m=+195.246965285" observedRunningTime="2026-01-22 05:21:49.807328021 +0000 UTC m=+195.890816236" watchObservedRunningTime="2026-01-22 05:21:49.808092655 +0000 UTC m=+195.891580870" Jan 22 05:21:50 crc kubenswrapper[4814]: I0122 05:21:50.787929 4814 generic.go:334] "Generic (PLEG): container finished" podID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerID="8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a" exitCode=0 Jan 22 05:21:50 crc kubenswrapper[4814]: I0122 05:21:50.788019 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7tr77" event={"ID":"82fb31a6-1417-4ef9-a19b-1e877ce55477","Type":"ContainerDied","Data":"8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a"} Jan 22 05:21:50 crc kubenswrapper[4814]: I0122 05:21:50.792082 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71","Type":"ContainerStarted","Data":"ff80441962ec0b35ad0e78a93adb8db482d2846894b4bbfa997d0d71e4365b50"} Jan 22 05:21:50 crc kubenswrapper[4814]: I0122 05:21:50.797777 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sl7ng" event={"ID":"33942fc4-20af-4f7f-a3db-e04a2356e2db","Type":"ContainerStarted","Data":"49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595"} Jan 22 05:21:50 crc kubenswrapper[4814]: I0122 05:21:50.831992 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.8319630719999997 podStartE2EDuration="2.831963072s" podCreationTimestamp="2026-01-22 05:21:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:21:50.827433222 +0000 UTC m=+196.910921437" watchObservedRunningTime="2026-01-22 05:21:50.831963072 +0000 UTC m=+196.915451297" Jan 22 05:21:51 crc kubenswrapper[4814]: I0122 05:21:51.669888 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:51 crc kubenswrapper[4814]: I0122 05:21:51.669958 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:51 crc kubenswrapper[4814]: I0122 05:21:51.745334 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:51 crc kubenswrapper[4814]: I0122 05:21:51.819364 4814 generic.go:334] "Generic (PLEG): container finished" podID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerID="49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595" exitCode=0 Jan 22 05:21:51 crc kubenswrapper[4814]: I0122 05:21:51.819495 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sl7ng" event={"ID":"33942fc4-20af-4f7f-a3db-e04a2356e2db","Type":"ContainerDied","Data":"49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595"} Jan 22 05:21:51 crc kubenswrapper[4814]: I0122 05:21:51.883604 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:21:51 crc kubenswrapper[4814]: I0122 05:21:51.910700 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:51 crc kubenswrapper[4814]: I0122 05:21:51.962280 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.108027 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.109703 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.155833 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.265684 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.266077 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.304194 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.827691 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7tr77" event={"ID":"82fb31a6-1417-4ef9-a19b-1e877ce55477","Type":"ContainerStarted","Data":"7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd"} Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.830590 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sl7ng" event={"ID":"33942fc4-20af-4f7f-a3db-e04a2356e2db","Type":"ContainerStarted","Data":"b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda"} Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.850580 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7tr77" podStartSLOduration=4.336194541 podStartE2EDuration="48.850558593s" podCreationTimestamp="2026-01-22 05:21:04 +0000 UTC" firstStartedPulling="2026-01-22 05:21:07.486941506 +0000 UTC m=+153.570429721" lastFinishedPulling="2026-01-22 05:21:52.001305528 +0000 UTC m=+198.084793773" observedRunningTime="2026-01-22 05:21:52.849487229 +0000 UTC m=+198.932975444" watchObservedRunningTime="2026-01-22 05:21:52.850558593 +0000 UTC m=+198.934046848" Jan 22 05:21:52 crc kubenswrapper[4814]: I0122 05:21:52.871256 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sl7ng" podStartSLOduration=3.191972845 podStartE2EDuration="49.871230587s" podCreationTimestamp="2026-01-22 05:21:03 +0000 UTC" firstStartedPulling="2026-01-22 05:21:05.39369407 +0000 UTC m=+151.477182285" lastFinishedPulling="2026-01-22 05:21:52.072951802 +0000 UTC m=+198.156440027" observedRunningTime="2026-01-22 05:21:52.865166808 +0000 UTC m=+198.948655033" watchObservedRunningTime="2026-01-22 05:21:52.871230587 +0000 UTC m=+198.954718842" Jan 22 05:21:53 crc kubenswrapper[4814]: I0122 05:21:53.848807 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:53 crc kubenswrapper[4814]: I0122 05:21:53.848851 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:21:54 crc kubenswrapper[4814]: I0122 05:21:54.354332 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:21:54 crc kubenswrapper[4814]: I0122 05:21:54.895528 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-sl7ng" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="registry-server" probeResult="failure" output=< Jan 22 05:21:54 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:21:54 crc kubenswrapper[4814]: > Jan 22 05:21:54 crc kubenswrapper[4814]: I0122 05:21:54.916748 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:54 crc kubenswrapper[4814]: I0122 05:21:54.968512 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:21:55 crc kubenswrapper[4814]: I0122 05:21:55.311503 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:55 crc kubenswrapper[4814]: I0122 05:21:55.311866 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:21:56 crc kubenswrapper[4814]: I0122 05:21:56.360822 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7tr77" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="registry-server" probeResult="failure" output=< Jan 22 05:21:56 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:21:56 crc kubenswrapper[4814]: > Jan 22 05:21:57 crc kubenswrapper[4814]: I0122 05:21:57.339702 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qcxd"] Jan 22 05:21:57 crc kubenswrapper[4814]: I0122 05:21:57.340306 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8qcxd" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerName="registry-server" containerID="cri-o://f3d6ce757b0c82edcc6851b226e3c32bf1910160f81aaeb7c3689b24b4dac4fa" gracePeriod=2 Jan 22 05:21:58 crc kubenswrapper[4814]: I0122 05:21:58.873444 4814 generic.go:334] "Generic (PLEG): container finished" podID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerID="f3d6ce757b0c82edcc6851b226e3c32bf1910160f81aaeb7c3689b24b4dac4fa" exitCode=0 Jan 22 05:21:58 crc kubenswrapper[4814]: I0122 05:21:58.873505 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qcxd" event={"ID":"2b42b8af-781f-4a40-bc43-658de04b12a6","Type":"ContainerDied","Data":"f3d6ce757b0c82edcc6851b226e3c32bf1910160f81aaeb7c3689b24b4dac4fa"} Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.190800 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.318536 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-utilities\") pod \"2b42b8af-781f-4a40-bc43-658de04b12a6\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.318731 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-catalog-content\") pod \"2b42b8af-781f-4a40-bc43-658de04b12a6\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.318879 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5pgm6\" (UniqueName: \"kubernetes.io/projected/2b42b8af-781f-4a40-bc43-658de04b12a6-kube-api-access-5pgm6\") pod \"2b42b8af-781f-4a40-bc43-658de04b12a6\" (UID: \"2b42b8af-781f-4a40-bc43-658de04b12a6\") " Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.319874 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-utilities" (OuterVolumeSpecName: "utilities") pod "2b42b8af-781f-4a40-bc43-658de04b12a6" (UID: "2b42b8af-781f-4a40-bc43-658de04b12a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.334789 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b42b8af-781f-4a40-bc43-658de04b12a6-kube-api-access-5pgm6" (OuterVolumeSpecName: "kube-api-access-5pgm6") pod "2b42b8af-781f-4a40-bc43-658de04b12a6" (UID: "2b42b8af-781f-4a40-bc43-658de04b12a6"). InnerVolumeSpecName "kube-api-access-5pgm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.342410 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2b42b8af-781f-4a40-bc43-658de04b12a6" (UID: "2b42b8af-781f-4a40-bc43-658de04b12a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.420553 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.420592 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5pgm6\" (UniqueName: \"kubernetes.io/projected/2b42b8af-781f-4a40-bc43-658de04b12a6-kube-api-access-5pgm6\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.420604 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b42b8af-781f-4a40-bc43-658de04b12a6-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.891907 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8qcxd" event={"ID":"2b42b8af-781f-4a40-bc43-658de04b12a6","Type":"ContainerDied","Data":"50c1f304e1938140a3a9ee16f646e73a0735d19258981355c0a29b1240bab577"} Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.891987 4814 scope.go:117] "RemoveContainer" containerID="f3d6ce757b0c82edcc6851b226e3c32bf1910160f81aaeb7c3689b24b4dac4fa" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.892183 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8qcxd" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.920015 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qcxd"] Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.925988 4814 scope.go:117] "RemoveContainer" containerID="48d3254fe9e4bec02fa66333be3f45b3607021c25df41a417095ff1ecdabb859" Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.938677 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8qcxd"] Jan 22 05:22:00 crc kubenswrapper[4814]: I0122 05:22:00.949298 4814 scope.go:117] "RemoveContainer" containerID="5da372f26f09bc598a177f78140d6205eb105f7c6e531641eafdb88cd4fa30fc" Jan 22 05:22:02 crc kubenswrapper[4814]: I0122 05:22:02.180360 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:22:02 crc kubenswrapper[4814]: I0122 05:22:02.349176 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" path="/var/lib/kubelet/pods/2b42b8af-781f-4a40-bc43-658de04b12a6/volumes" Jan 22 05:22:02 crc kubenswrapper[4814]: I0122 05:22:02.390483 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:22:03 crc kubenswrapper[4814]: I0122 05:22:03.924228 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:22:03 crc kubenswrapper[4814]: I0122 05:22:03.997544 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:22:04 crc kubenswrapper[4814]: I0122 05:22:04.139998 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5njqz"] Jan 22 05:22:04 crc kubenswrapper[4814]: I0122 05:22:04.140589 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5njqz" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerName="registry-server" containerID="cri-o://9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4" gracePeriod=2 Jan 22 05:22:04 crc kubenswrapper[4814]: I0122 05:22:04.744328 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sgjff"] Jan 22 05:22:04 crc kubenswrapper[4814]: I0122 05:22:04.744714 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sgjff" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerName="registry-server" containerID="cri-o://a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c" gracePeriod=2 Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.388490 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.488551 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.723859 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.729687 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.799521 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkwss\" (UniqueName: \"kubernetes.io/projected/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-kube-api-access-mkwss\") pod \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.799590 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-catalog-content\") pod \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.799697 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-utilities\") pod \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\" (UID: \"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f\") " Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.800583 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-utilities" (OuterVolumeSpecName: "utilities") pod "46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" (UID: "46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.807988 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-kube-api-access-mkwss" (OuterVolumeSpecName: "kube-api-access-mkwss") pod "46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" (UID: "46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f"). InnerVolumeSpecName "kube-api-access-mkwss". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.843753 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" (UID: "46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.900343 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-utilities\") pod \"66f44c43-4c03-4af5-bba9-849b3d9b8724\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.900434 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-catalog-content\") pod \"66f44c43-4c03-4af5-bba9-849b3d9b8724\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.900455 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htkgt\" (UniqueName: \"kubernetes.io/projected/66f44c43-4c03-4af5-bba9-849b3d9b8724-kube-api-access-htkgt\") pod \"66f44c43-4c03-4af5-bba9-849b3d9b8724\" (UID: \"66f44c43-4c03-4af5-bba9-849b3d9b8724\") " Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.900685 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.900704 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.900745 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkwss\" (UniqueName: \"kubernetes.io/projected/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f-kube-api-access-mkwss\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.903728 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-utilities" (OuterVolumeSpecName: "utilities") pod "66f44c43-4c03-4af5-bba9-849b3d9b8724" (UID: "66f44c43-4c03-4af5-bba9-849b3d9b8724"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.904536 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66f44c43-4c03-4af5-bba9-849b3d9b8724-kube-api-access-htkgt" (OuterVolumeSpecName: "kube-api-access-htkgt") pod "66f44c43-4c03-4af5-bba9-849b3d9b8724" (UID: "66f44c43-4c03-4af5-bba9-849b3d9b8724"). InnerVolumeSpecName "kube-api-access-htkgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.936673 4814 generic.go:334] "Generic (PLEG): container finished" podID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerID="9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4" exitCode=0 Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.936795 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5njqz" event={"ID":"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f","Type":"ContainerDied","Data":"9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4"} Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.936872 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5njqz" event={"ID":"46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f","Type":"ContainerDied","Data":"efb50102ef7a6a0b1bacb1b63adf04e254c0f98ee588f7f1c033a06b04bf6705"} Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.936891 4814 scope.go:117] "RemoveContainer" containerID="9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.937234 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5njqz" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.939048 4814 generic.go:334] "Generic (PLEG): container finished" podID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerID="a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c" exitCode=0 Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.939142 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sgjff" event={"ID":"66f44c43-4c03-4af5-bba9-849b3d9b8724","Type":"ContainerDied","Data":"a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c"} Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.939168 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sgjff" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.939191 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sgjff" event={"ID":"66f44c43-4c03-4af5-bba9-849b3d9b8724","Type":"ContainerDied","Data":"8535d532da3376ec3c40b7171f345855ce3f4d30930de54fcee6c580720169ba"} Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.958886 4814 scope.go:117] "RemoveContainer" containerID="d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.984086 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5njqz"] Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.987344 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5njqz"] Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.988914 4814 scope.go:117] "RemoveContainer" containerID="ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55" Jan 22 05:22:05 crc kubenswrapper[4814]: I0122 05:22:05.989532 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66f44c43-4c03-4af5-bba9-849b3d9b8724" (UID: "66f44c43-4c03-4af5-bba9-849b3d9b8724"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.002453 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.002495 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66f44c43-4c03-4af5-bba9-849b3d9b8724-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.002518 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htkgt\" (UniqueName: \"kubernetes.io/projected/66f44c43-4c03-4af5-bba9-849b3d9b8724-kube-api-access-htkgt\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.005673 4814 scope.go:117] "RemoveContainer" containerID="9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4" Jan 22 05:22:06 crc kubenswrapper[4814]: E0122 05:22:06.006154 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4\": container with ID starting with 9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4 not found: ID does not exist" containerID="9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.006204 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4"} err="failed to get container status \"9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4\": rpc error: code = NotFound desc = could not find container \"9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4\": container with ID starting with 9bf4a5bd1ea5f320ebd2c4e95dc57f37445039cbba94198a694c246b46ba3ff4 not found: ID does not exist" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.006283 4814 scope.go:117] "RemoveContainer" containerID="d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376" Jan 22 05:22:06 crc kubenswrapper[4814]: E0122 05:22:06.006928 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376\": container with ID starting with d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376 not found: ID does not exist" containerID="d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.006970 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376"} err="failed to get container status \"d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376\": rpc error: code = NotFound desc = could not find container \"d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376\": container with ID starting with d38a263850f03e353005101c36cb66b945f53677072c3d01516fd301e4c1d376 not found: ID does not exist" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.006998 4814 scope.go:117] "RemoveContainer" containerID="ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55" Jan 22 05:22:06 crc kubenswrapper[4814]: E0122 05:22:06.007506 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55\": container with ID starting with ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55 not found: ID does not exist" containerID="ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.007562 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55"} err="failed to get container status \"ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55\": rpc error: code = NotFound desc = could not find container \"ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55\": container with ID starting with ee697e25dbb0cd4f02234e9302a6ca548f9e8fc9c445f182362e9fa699256c55 not found: ID does not exist" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.007593 4814 scope.go:117] "RemoveContainer" containerID="a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.025083 4814 scope.go:117] "RemoveContainer" containerID="5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.039350 4814 scope.go:117] "RemoveContainer" containerID="42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.061483 4814 scope.go:117] "RemoveContainer" containerID="a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c" Jan 22 05:22:06 crc kubenswrapper[4814]: E0122 05:22:06.062143 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c\": container with ID starting with a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c not found: ID does not exist" containerID="a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.062223 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c"} err="failed to get container status \"a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c\": rpc error: code = NotFound desc = could not find container \"a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c\": container with ID starting with a97434e8625ec948c89558aa77ddcdab0e5522b78f456cd0b70e508e42562d1c not found: ID does not exist" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.062285 4814 scope.go:117] "RemoveContainer" containerID="5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf" Jan 22 05:22:06 crc kubenswrapper[4814]: E0122 05:22:06.063476 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf\": container with ID starting with 5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf not found: ID does not exist" containerID="5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.063531 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf"} err="failed to get container status \"5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf\": rpc error: code = NotFound desc = could not find container \"5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf\": container with ID starting with 5c9efbb16297abe6b15073944c9f4f08813207274d504bb7c57b4c6ad5a8b3cf not found: ID does not exist" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.063567 4814 scope.go:117] "RemoveContainer" containerID="42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617" Jan 22 05:22:06 crc kubenswrapper[4814]: E0122 05:22:06.063970 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617\": container with ID starting with 42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617 not found: ID does not exist" containerID="42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.064020 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617"} err="failed to get container status \"42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617\": rpc error: code = NotFound desc = could not find container \"42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617\": container with ID starting with 42043afecbf439836499037a01a24e562ea6e2b11b73d3324bfdd27509196617 not found: ID does not exist" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.271781 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sgjff"] Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.275083 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sgjff"] Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.352104 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" path="/var/lib/kubelet/pods/46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f/volumes" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.353393 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" path="/var/lib/kubelet/pods/66f44c43-4c03-4af5-bba9-849b3d9b8724/volumes" Jan 22 05:22:06 crc kubenswrapper[4814]: I0122 05:22:06.845613 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" podUID="cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" containerName="oauth-openshift" containerID="cri-o://5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8" gracePeriod=15 Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.247440 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.423841 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-provider-selection\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.423890 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-session\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.423912 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z48ms\" (UniqueName: \"kubernetes.io/projected/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-kube-api-access-z48ms\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.423933 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-ocp-branding-template\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.423975 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-service-ca\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424011 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-dir\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424046 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-idp-0-file-data\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424074 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-login\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424095 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-cliconfig\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424121 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-error\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424147 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-trusted-ca-bundle\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424166 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-router-certs\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424193 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-policies\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.424216 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-serving-cert\") pod \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\" (UID: \"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87\") " Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.425116 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.425253 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.425337 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.425407 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.427939 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.431211 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.432142 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-kube-api-access-z48ms" (OuterVolumeSpecName: "kube-api-access-z48ms") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "kube-api-access-z48ms". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.438931 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.439182 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.439961 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.440462 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.440797 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.440877 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.441898 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" (UID: "cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526253 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526311 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526338 4814 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526371 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526394 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526415 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526437 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526464 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526488 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526508 4814 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526528 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526556 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526578 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z48ms\" (UniqueName: \"kubernetes.io/projected/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-kube-api-access-z48ms\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.526598 4814 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.958105 4814 generic.go:334] "Generic (PLEG): container finished" podID="cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" containerID="5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8" exitCode=0 Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.958155 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" event={"ID":"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87","Type":"ContainerDied","Data":"5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8"} Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.958185 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" event={"ID":"cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87","Type":"ContainerDied","Data":"ad1f90a981441c7c55a2b109b00946cd5204f1d5d0ed427365abe869d1e17af7"} Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.958191 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-mhktc" Jan 22 05:22:07 crc kubenswrapper[4814]: I0122 05:22:07.958206 4814 scope.go:117] "RemoveContainer" containerID="5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8" Jan 22 05:22:08 crc kubenswrapper[4814]: I0122 05:22:07.995300 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mhktc"] Jan 22 05:22:08 crc kubenswrapper[4814]: I0122 05:22:08.001490 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mhktc"] Jan 22 05:22:08 crc kubenswrapper[4814]: I0122 05:22:08.028200 4814 scope.go:117] "RemoveContainer" containerID="5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8" Jan 22 05:22:08 crc kubenswrapper[4814]: E0122 05:22:08.028648 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8\": container with ID starting with 5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8 not found: ID does not exist" containerID="5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8" Jan 22 05:22:08 crc kubenswrapper[4814]: I0122 05:22:08.028679 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8"} err="failed to get container status \"5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8\": rpc error: code = NotFound desc = could not find container \"5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8\": container with ID starting with 5a4bb12a4c537ee7ec6f23e94b659efd01a8cbfd1983c30c8896cb7ebe8180d8 not found: ID does not exist" Jan 22 05:22:08 crc kubenswrapper[4814]: I0122 05:22:08.354943 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" path="/var/lib/kubelet/pods/cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87/volumes" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.137718 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7tr77"] Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.138048 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7tr77" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="registry-server" containerID="cri-o://7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd" gracePeriod=2 Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.481372 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.655161 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-utilities\") pod \"82fb31a6-1417-4ef9-a19b-1e877ce55477\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.655231 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4xtm\" (UniqueName: \"kubernetes.io/projected/82fb31a6-1417-4ef9-a19b-1e877ce55477-kube-api-access-t4xtm\") pod \"82fb31a6-1417-4ef9-a19b-1e877ce55477\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.655262 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-catalog-content\") pod \"82fb31a6-1417-4ef9-a19b-1e877ce55477\" (UID: \"82fb31a6-1417-4ef9-a19b-1e877ce55477\") " Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.656197 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-utilities" (OuterVolumeSpecName: "utilities") pod "82fb31a6-1417-4ef9-a19b-1e877ce55477" (UID: "82fb31a6-1417-4ef9-a19b-1e877ce55477"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.670206 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82fb31a6-1417-4ef9-a19b-1e877ce55477-kube-api-access-t4xtm" (OuterVolumeSpecName: "kube-api-access-t4xtm") pod "82fb31a6-1417-4ef9-a19b-1e877ce55477" (UID: "82fb31a6-1417-4ef9-a19b-1e877ce55477"). InnerVolumeSpecName "kube-api-access-t4xtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.756482 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.756520 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4xtm\" (UniqueName: \"kubernetes.io/projected/82fb31a6-1417-4ef9-a19b-1e877ce55477-kube-api-access-t4xtm\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.779159 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "82fb31a6-1417-4ef9-a19b-1e877ce55477" (UID: "82fb31a6-1417-4ef9-a19b-1e877ce55477"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.857671 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82fb31a6-1417-4ef9-a19b-1e877ce55477-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.971907 4814 generic.go:334] "Generic (PLEG): container finished" podID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerID="7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd" exitCode=0 Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.971956 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7tr77" event={"ID":"82fb31a6-1417-4ef9-a19b-1e877ce55477","Type":"ContainerDied","Data":"7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd"} Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.971987 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7tr77" event={"ID":"82fb31a6-1417-4ef9-a19b-1e877ce55477","Type":"ContainerDied","Data":"7e21924bcd77ee29f5338c704c3020e49eb884fca6f602b4061cd23ff981462e"} Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.972009 4814 scope.go:117] "RemoveContainer" containerID="7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.972122 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7tr77" Jan 22 05:22:09 crc kubenswrapper[4814]: I0122 05:22:09.997492 4814 scope.go:117] "RemoveContainer" containerID="8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a" Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.014048 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7tr77"] Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.018134 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7tr77"] Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.019954 4814 scope.go:117] "RemoveContainer" containerID="8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7" Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.034170 4814 scope.go:117] "RemoveContainer" containerID="7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd" Jan 22 05:22:10 crc kubenswrapper[4814]: E0122 05:22:10.034614 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd\": container with ID starting with 7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd not found: ID does not exist" containerID="7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd" Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.034677 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd"} err="failed to get container status \"7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd\": rpc error: code = NotFound desc = could not find container \"7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd\": container with ID starting with 7a09c7c3130299285d9ad1f601102927f1ef4c3a2dc50b72c06958dc1952c1cd not found: ID does not exist" Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.034703 4814 scope.go:117] "RemoveContainer" containerID="8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a" Jan 22 05:22:10 crc kubenswrapper[4814]: E0122 05:22:10.034953 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a\": container with ID starting with 8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a not found: ID does not exist" containerID="8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a" Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.034988 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a"} err="failed to get container status \"8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a\": rpc error: code = NotFound desc = could not find container \"8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a\": container with ID starting with 8998bf46da657c00f675f7b80881129e9cb7f4c1c40c0a24e69bec64e0ca5e4a not found: ID does not exist" Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.035009 4814 scope.go:117] "RemoveContainer" containerID="8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7" Jan 22 05:22:10 crc kubenswrapper[4814]: E0122 05:22:10.035227 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7\": container with ID starting with 8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7 not found: ID does not exist" containerID="8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7" Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.035254 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7"} err="failed to get container status \"8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7\": rpc error: code = NotFound desc = could not find container \"8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7\": container with ID starting with 8ba959aa7953a1572712a56d67e4926464d159f0c200fd1378998f61d00581d7 not found: ID does not exist" Jan 22 05:22:10 crc kubenswrapper[4814]: I0122 05:22:10.349923 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" path="/var/lib/kubelet/pods/82fb31a6-1417-4ef9-a19b-1e877ce55477/volumes" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373076 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7c89776f78-5xbp4"] Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373726 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" containerName="oauth-openshift" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373748 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" containerName="oauth-openshift" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373767 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerName="extract-utilities" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373781 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerName="extract-utilities" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373803 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="extract-utilities" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373815 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="extract-utilities" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373837 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerName="extract-utilities" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373850 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerName="extract-utilities" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373863 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373874 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373894 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerName="extract-content" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373906 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerName="extract-content" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373921 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerName="extract-content" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373934 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerName="extract-content" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373948 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="extract-content" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373960 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="extract-content" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.373980 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerName="extract-content" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.373991 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerName="extract-content" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.374007 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374018 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.374035 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerName="extract-utilities" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374047 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerName="extract-utilities" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.374064 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374077 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: E0122 05:22:14.374093 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374105 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374261 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="66f44c43-4c03-4af5-bba9-849b3d9b8724" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374279 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb7bd6f7-a4d4-4bab-9b79-dcd19c3f4d87" containerName="oauth-openshift" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374306 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b42b8af-781f-4a40-bc43-658de04b12a6" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374321 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="46bd2ee7-6595-4d80-b6d8-f53bfef2ee2f" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374337 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="82fb31a6-1417-4ef9-a19b-1e877ce55477" containerName="registry-server" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.374915 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.377466 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.382081 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.382483 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.382510 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.383229 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.383289 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.383826 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.384116 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.384497 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.388673 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.388967 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.389208 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.403619 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.406357 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7c89776f78-5xbp4"] Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.412842 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.421476 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.516859 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.516988 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517040 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eb4bca64-45a7-46a4-b463-619b090c8e41-audit-dir\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517141 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsxxk\" (UniqueName: \"kubernetes.io/projected/eb4bca64-45a7-46a4-b463-619b090c8e41-kube-api-access-rsxxk\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517179 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517218 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517255 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517326 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517364 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-login\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517400 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-session\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517468 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-error\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517503 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-audit-policies\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517539 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.517576 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.618437 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eb4bca64-45a7-46a4-b463-619b090c8e41-audit-dir\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.618809 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsxxk\" (UniqueName: \"kubernetes.io/projected/eb4bca64-45a7-46a4-b463-619b090c8e41-kube-api-access-rsxxk\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.618918 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.619199 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.619318 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.618697 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eb4bca64-45a7-46a4-b463-619b090c8e41-audit-dir\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.619586 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.621099 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-login\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.621229 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-session\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.621348 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-error\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.621442 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-audit-policies\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.622261 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.622389 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.622505 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.622609 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.622973 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-audit-policies\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.624249 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.621540 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.627064 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-login\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.628085 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-session\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.628140 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.628757 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.637176 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.637709 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.637763 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-user-template-error\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.638710 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.638926 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eb4bca64-45a7-46a4-b463-619b090c8e41-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.642092 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsxxk\" (UniqueName: \"kubernetes.io/projected/eb4bca64-45a7-46a4-b463-619b090c8e41-kube-api-access-rsxxk\") pod \"oauth-openshift-7c89776f78-5xbp4\" (UID: \"eb4bca64-45a7-46a4-b463-619b090c8e41\") " pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.711742 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:14 crc kubenswrapper[4814]: I0122 05:22:14.966549 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7c89776f78-5xbp4"] Jan 22 05:22:14 crc kubenswrapper[4814]: W0122 05:22:14.975708 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb4bca64_45a7_46a4_b463_619b090c8e41.slice/crio-1f45334da73a3b2b0bb776966bbb9d8cbfd6000fe175a19135cf3f5364ed2e73 WatchSource:0}: Error finding container 1f45334da73a3b2b0bb776966bbb9d8cbfd6000fe175a19135cf3f5364ed2e73: Status 404 returned error can't find the container with id 1f45334da73a3b2b0bb776966bbb9d8cbfd6000fe175a19135cf3f5364ed2e73 Jan 22 05:22:15 crc kubenswrapper[4814]: I0122 05:22:15.008279 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" event={"ID":"eb4bca64-45a7-46a4-b463-619b090c8e41","Type":"ContainerStarted","Data":"1f45334da73a3b2b0bb776966bbb9d8cbfd6000fe175a19135cf3f5364ed2e73"} Jan 22 05:22:16 crc kubenswrapper[4814]: I0122 05:22:16.016435 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" event={"ID":"eb4bca64-45a7-46a4-b463-619b090c8e41","Type":"ContainerStarted","Data":"aba1eed6413f3b3ad76acfdd93a9749eff004b119097b7b7dfb55f60d26e42c6"} Jan 22 05:22:16 crc kubenswrapper[4814]: I0122 05:22:16.017008 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:16 crc kubenswrapper[4814]: I0122 05:22:16.025105 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" Jan 22 05:22:16 crc kubenswrapper[4814]: I0122 05:22:16.095825 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7c89776f78-5xbp4" podStartSLOduration=35.095790258 podStartE2EDuration="35.095790258s" podCreationTimestamp="2026-01-22 05:21:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:22:16.050158045 +0000 UTC m=+222.133646350" watchObservedRunningTime="2026-01-22 05:22:16.095790258 +0000 UTC m=+222.179278523" Jan 22 05:22:19 crc kubenswrapper[4814]: I0122 05:22:19.614103 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:22:19 crc kubenswrapper[4814]: I0122 05:22:19.614449 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:22:19 crc kubenswrapper[4814]: I0122 05:22:19.614526 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:22:19 crc kubenswrapper[4814]: I0122 05:22:19.615404 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:22:19 crc kubenswrapper[4814]: I0122 05:22:19.615512 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711" gracePeriod=600 Jan 22 05:22:20 crc kubenswrapper[4814]: I0122 05:22:20.054105 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711" exitCode=0 Jan 22 05:22:20 crc kubenswrapper[4814]: I0122 05:22:20.054229 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711"} Jan 22 05:22:20 crc kubenswrapper[4814]: I0122 05:22:20.054460 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"f8c5987ebbafcee5f7525abf6e4789b335512d5dc41a68223adfc3fcea787722"} Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.804721 4814 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.805973 4814 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.806206 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc" gracePeriod=15 Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.806342 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.806673 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b" gracePeriod=15 Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.806745 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22" gracePeriod=15 Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.806777 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445" gracePeriod=15 Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.806806 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8" gracePeriod=15 Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.807742 4814 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 05:22:27 crc kubenswrapper[4814]: E0122 05:22:27.807873 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.807883 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 05:22:27 crc kubenswrapper[4814]: E0122 05:22:27.807896 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.807901 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 22 05:22:27 crc kubenswrapper[4814]: E0122 05:22:27.807913 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.807918 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:22:27 crc kubenswrapper[4814]: E0122 05:22:27.807928 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.807933 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 05:22:27 crc kubenswrapper[4814]: E0122 05:22:27.807942 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.807948 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:22:27 crc kubenswrapper[4814]: E0122 05:22:27.807955 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.807961 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 05:22:27 crc kubenswrapper[4814]: E0122 05:22:27.807969 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.807974 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.808060 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.808070 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.808118 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.808127 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.808135 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.808145 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.858083 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.903857 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.904305 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.904348 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.904364 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.904381 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.904400 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.904421 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:27 crc kubenswrapper[4814]: I0122 05:22:27.904453 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005304 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005411 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005434 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005454 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005474 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005510 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005530 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005553 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005595 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005385 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005661 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005687 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005707 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005728 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005749 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.005778 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.100070 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.101545 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.102691 4814 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b" exitCode=0 Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.102719 4814 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22" exitCode=0 Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.102727 4814 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445" exitCode=0 Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.102734 4814 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8" exitCode=2 Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.102872 4814 scope.go:117] "RemoveContainer" containerID="5fee79ea7b70be3bd7c55ed45956532c65dc31d4cbd153c3f932738444f2b832" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.113850 4814 generic.go:334] "Generic (PLEG): container finished" podID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" containerID="ff80441962ec0b35ad0e78a93adb8db482d2846894b4bbfa997d0d71e4365b50" exitCode=0 Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.113890 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71","Type":"ContainerDied","Data":"ff80441962ec0b35ad0e78a93adb8db482d2846894b4bbfa997d0d71e4365b50"} Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.114674 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.115121 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.115455 4814 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:28 crc kubenswrapper[4814]: I0122 05:22:28.159945 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:22:28 crc kubenswrapper[4814]: E0122 05:22:28.182293 4814 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188cf61ab2f3062b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 05:22:28.181616171 +0000 UTC m=+234.265104386,LastTimestamp:2026-01-22 05:22:28.181616171 +0000 UTC m=+234.265104386,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.122987 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.126582 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea"} Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.126653 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"6ad91ac0890b2782a0bd72925546c67c8920cb99624e3aa2c5b2dd648621af78"} Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.127710 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.128170 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.374529 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.375505 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.375835 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.556934 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kube-api-access\") pod \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.557005 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kubelet-dir\") pod \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.557243 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-var-lock\") pod \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\" (UID: \"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71\") " Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.557335 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" (UID: "3a84f2d9-eeed-4106-8d46-11e6e0bf5f71"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.557519 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-var-lock" (OuterVolumeSpecName: "var-lock") pod "3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" (UID: "3a84f2d9-eeed-4106-8d46-11e6e0bf5f71"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.557613 4814 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-var-lock\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.557650 4814 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.565331 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" (UID: "3a84f2d9-eeed-4106-8d46-11e6e0bf5f71"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:22:29 crc kubenswrapper[4814]: I0122 05:22:29.658446 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3a84f2d9-eeed-4106-8d46-11e6e0bf5f71-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.132814 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.132813 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"3a84f2d9-eeed-4106-8d46-11e6e0bf5f71","Type":"ContainerDied","Data":"3f6e13ff835e0dbaef126cd34235bbe5bb55b2a4ecfbf7b396b1e6954ab7e240"} Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.134379 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f6e13ff835e0dbaef126cd34235bbe5bb55b2a4ecfbf7b396b1e6954ab7e240" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.145884 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.146245 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.277320 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.278216 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.278803 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.279171 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.279480 4814 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.470014 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.470054 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.470072 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.470358 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.470393 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.470393 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.571677 4814 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.571713 4814 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:30 crc kubenswrapper[4814]: I0122 05:22:30.571730 4814 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.142712 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.143788 4814 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc" exitCode=0 Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.143883 4814 scope.go:117] "RemoveContainer" containerID="fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.144097 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.145039 4814 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.145199 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.145340 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.166245 4814 scope.go:117] "RemoveContainer" containerID="0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.176379 4814 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.176615 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.176787 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.208271 4814 scope.go:117] "RemoveContainer" containerID="5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.222740 4814 scope.go:117] "RemoveContainer" containerID="2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.241284 4814 scope.go:117] "RemoveContainer" containerID="83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.261352 4814 scope.go:117] "RemoveContainer" containerID="db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.284505 4814 scope.go:117] "RemoveContainer" containerID="fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b" Jan 22 05:22:31 crc kubenswrapper[4814]: E0122 05:22:31.285007 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\": container with ID starting with fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b not found: ID does not exist" containerID="fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.285049 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b"} err="failed to get container status \"fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\": rpc error: code = NotFound desc = could not find container \"fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b\": container with ID starting with fff5c7246639a849227ee490bfa58ad64f5d38f20a6b5c497db0835896df210b not found: ID does not exist" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.285077 4814 scope.go:117] "RemoveContainer" containerID="0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22" Jan 22 05:22:31 crc kubenswrapper[4814]: E0122 05:22:31.285264 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\": container with ID starting with 0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22 not found: ID does not exist" containerID="0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.285290 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22"} err="failed to get container status \"0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\": rpc error: code = NotFound desc = could not find container \"0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22\": container with ID starting with 0861107b430ee0b1b4b6b96a3d9d1d9629f333290aa5dcbb4c00c249e56aae22 not found: ID does not exist" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.285304 4814 scope.go:117] "RemoveContainer" containerID="5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445" Jan 22 05:22:31 crc kubenswrapper[4814]: E0122 05:22:31.285469 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\": container with ID starting with 5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445 not found: ID does not exist" containerID="5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.285488 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445"} err="failed to get container status \"5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\": rpc error: code = NotFound desc = could not find container \"5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445\": container with ID starting with 5f1e1345e476254e28507b964df1e4fb4bbbacef7d3b0b33604b9f511d294445 not found: ID does not exist" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.285502 4814 scope.go:117] "RemoveContainer" containerID="2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8" Jan 22 05:22:31 crc kubenswrapper[4814]: E0122 05:22:31.285772 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\": container with ID starting with 2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8 not found: ID does not exist" containerID="2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.285827 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8"} err="failed to get container status \"2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\": rpc error: code = NotFound desc = could not find container \"2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8\": container with ID starting with 2f93d263e13148cb6ec0d4062eed92155c1ef2851698ad99fdc66edbbb0a2ff8 not found: ID does not exist" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.285857 4814 scope.go:117] "RemoveContainer" containerID="83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc" Jan 22 05:22:31 crc kubenswrapper[4814]: E0122 05:22:31.286107 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\": container with ID starting with 83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc not found: ID does not exist" containerID="83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.286130 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc"} err="failed to get container status \"83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\": rpc error: code = NotFound desc = could not find container \"83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc\": container with ID starting with 83a4482b1edb4de732f7b991efdb7c906f54d0a5c794c8c136037d6d930466fc not found: ID does not exist" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.286154 4814 scope.go:117] "RemoveContainer" containerID="db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290" Jan 22 05:22:31 crc kubenswrapper[4814]: E0122 05:22:31.286357 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\": container with ID starting with db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290 not found: ID does not exist" containerID="db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290" Jan 22 05:22:31 crc kubenswrapper[4814]: I0122 05:22:31.286374 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290"} err="failed to get container status \"db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\": rpc error: code = NotFound desc = could not find container \"db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290\": container with ID starting with db6d9861f6e601339c9a03554a088e32b76e1776fa485ebba1409b2f59895290 not found: ID does not exist" Jan 22 05:22:32 crc kubenswrapper[4814]: I0122 05:22:32.355984 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 22 05:22:33 crc kubenswrapper[4814]: E0122 05:22:33.418555 4814 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" volumeName="registry-storage" Jan 22 05:22:33 crc kubenswrapper[4814]: E0122 05:22:33.624460 4814 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:33 crc kubenswrapper[4814]: E0122 05:22:33.624721 4814 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:33 crc kubenswrapper[4814]: E0122 05:22:33.625036 4814 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:33 crc kubenswrapper[4814]: E0122 05:22:33.625310 4814 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:33 crc kubenswrapper[4814]: E0122 05:22:33.625521 4814 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:33 crc kubenswrapper[4814]: I0122 05:22:33.625547 4814 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 22 05:22:33 crc kubenswrapper[4814]: E0122 05:22:33.625785 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="200ms" Jan 22 05:22:33 crc kubenswrapper[4814]: E0122 05:22:33.827306 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="400ms" Jan 22 05:22:34 crc kubenswrapper[4814]: E0122 05:22:34.227849 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="800ms" Jan 22 05:22:34 crc kubenswrapper[4814]: I0122 05:22:34.345755 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:34 crc kubenswrapper[4814]: I0122 05:22:34.346560 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:35 crc kubenswrapper[4814]: E0122 05:22:35.029196 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="1.6s" Jan 22 05:22:36 crc kubenswrapper[4814]: E0122 05:22:36.629986 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="3.2s" Jan 22 05:22:37 crc kubenswrapper[4814]: E0122 05:22:37.229435 4814 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188cf61ab2f3062b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-22 05:22:28.181616171 +0000 UTC m=+234.265104386,LastTimestamp:2026-01-22 05:22:28.181616171 +0000 UTC m=+234.265104386,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 22 05:22:39 crc kubenswrapper[4814]: E0122 05:22:39.831310 4814 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="6.4s" Jan 22 05:22:41 crc kubenswrapper[4814]: I0122 05:22:41.405444 4814 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 22 05:22:41 crc kubenswrapper[4814]: I0122 05:22:41.405525 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.212551 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.212703 4814 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5" exitCode=1 Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.212760 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5"} Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.213459 4814 scope.go:117] "RemoveContainer" containerID="78498ef7683ed1974dfbd05b899a6bdfda146b7efeb38001cad10d1e3ce03ec5" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.213908 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.214495 4814 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.215891 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.345549 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.347318 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.347744 4814 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.347956 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.366464 4814 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.366500 4814 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:42 crc kubenswrapper[4814]: E0122 05:22:42.367846 4814 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:42 crc kubenswrapper[4814]: I0122 05:22:42.368524 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:42 crc kubenswrapper[4814]: W0122 05:22:42.408886 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-a694fed6d1be44e71e8000d51c8749f01f2633a3bb4777bcd2cdcc2bb44bc0eb WatchSource:0}: Error finding container a694fed6d1be44e71e8000d51c8749f01f2633a3bb4777bcd2cdcc2bb44bc0eb: Status 404 returned error can't find the container with id a694fed6d1be44e71e8000d51c8749f01f2633a3bb4777bcd2cdcc2bb44bc0eb Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.228387 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.229201 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"093681634a88364cc81df5b99ef760ccd5dafec4ea3119640d1a22db03282fe5"} Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.231334 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.232019 4814 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.232545 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.232771 4814 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="ea95aef224fff5fe2d6f55a880c4f28a67535f1f0191e66d96ce0f57e9312aaa" exitCode=0 Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.232810 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"ea95aef224fff5fe2d6f55a880c4f28a67535f1f0191e66d96ce0f57e9312aaa"} Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.232837 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a694fed6d1be44e71e8000d51c8749f01f2633a3bb4777bcd2cdcc2bb44bc0eb"} Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.233316 4814 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.233336 4814 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.233777 4814 status_manager.go:851] "Failed to get status for pod" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:43 crc kubenswrapper[4814]: E0122 05:22:43.233818 4814 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.233994 4814 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.234216 4814 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.877379 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:22:43 crc kubenswrapper[4814]: I0122 05:22:43.887861 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:22:44 crc kubenswrapper[4814]: I0122 05:22:44.243988 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3532b0449d37c87cff141258da2ae872a805cae0096a2c107bec8d824c0b8977"} Jan 22 05:22:44 crc kubenswrapper[4814]: I0122 05:22:44.244690 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:22:44 crc kubenswrapper[4814]: I0122 05:22:44.244723 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"84b69832efa95669564e3abe413abd8a586fbff52e55646b29d44cb340d023e8"} Jan 22 05:22:44 crc kubenswrapper[4814]: I0122 05:22:44.244743 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"72d3c1d00cc0f9bc4ee4fcf388913b652a71d0a31020bbc34e56e09f6006ab44"} Jan 22 05:22:45 crc kubenswrapper[4814]: I0122 05:22:45.259762 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"77fa919fa5d43854ff0525f8a8f000276c0b603472b80602fd5ae815fb32a1af"} Jan 22 05:22:45 crc kubenswrapper[4814]: I0122 05:22:45.259822 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"381cea1626899ddfdc75354331fef2fd601724e07ea264ca45bb2bd4d7dc7997"} Jan 22 05:22:45 crc kubenswrapper[4814]: I0122 05:22:45.260294 4814 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:45 crc kubenswrapper[4814]: I0122 05:22:45.260320 4814 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:47 crc kubenswrapper[4814]: I0122 05:22:47.369534 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:47 crc kubenswrapper[4814]: I0122 05:22:47.369920 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:47 crc kubenswrapper[4814]: I0122 05:22:47.376963 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:50 crc kubenswrapper[4814]: I0122 05:22:50.291080 4814 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:50 crc kubenswrapper[4814]: I0122 05:22:50.407482 4814 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="44babb26-5744-4df1-b48e-157b52f14c6b" Jan 22 05:22:51 crc kubenswrapper[4814]: I0122 05:22:51.300240 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:51 crc kubenswrapper[4814]: I0122 05:22:51.300492 4814 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:51 crc kubenswrapper[4814]: I0122 05:22:51.300524 4814 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:51 crc kubenswrapper[4814]: I0122 05:22:51.306415 4814 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="44babb26-5744-4df1-b48e-157b52f14c6b" Jan 22 05:22:52 crc kubenswrapper[4814]: I0122 05:22:52.306523 4814 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:52 crc kubenswrapper[4814]: I0122 05:22:52.306567 4814 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:52 crc kubenswrapper[4814]: I0122 05:22:52.311029 4814 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="44babb26-5744-4df1-b48e-157b52f14c6b" Jan 22 05:22:52 crc kubenswrapper[4814]: I0122 05:22:52.314537 4814 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://72d3c1d00cc0f9bc4ee4fcf388913b652a71d0a31020bbc34e56e09f6006ab44" Jan 22 05:22:52 crc kubenswrapper[4814]: I0122 05:22:52.314587 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:22:53 crc kubenswrapper[4814]: I0122 05:22:53.312942 4814 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:53 crc kubenswrapper[4814]: I0122 05:22:53.313200 4814 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="35389f4a-a247-4636-9091-1e9057355d49" Jan 22 05:22:53 crc kubenswrapper[4814]: I0122 05:22:53.317409 4814 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="44babb26-5744-4df1-b48e-157b52f14c6b" Jan 22 05:22:59 crc kubenswrapper[4814]: I0122 05:22:59.703691 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 22 05:23:00 crc kubenswrapper[4814]: I0122 05:23:00.272329 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 22 05:23:00 crc kubenswrapper[4814]: I0122 05:23:00.727178 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:23:00 crc kubenswrapper[4814]: I0122 05:23:00.834570 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 22 05:23:00 crc kubenswrapper[4814]: I0122 05:23:00.974720 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.059440 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.148541 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.195083 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.202325 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.235392 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.300913 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.410616 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.558996 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.579970 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.665009 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.744668 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.747995 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 22 05:23:01 crc kubenswrapper[4814]: I0122 05:23:01.868931 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.088400 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.134166 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.226944 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.287653 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.403018 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.622889 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.694495 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.860922 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.879163 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 22 05:23:02 crc kubenswrapper[4814]: I0122 05:23:02.942049 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.076616 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.090428 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.148430 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.188820 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.206767 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.242942 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.327507 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.489815 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.525373 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.553121 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.555215 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.663969 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.717243 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.817316 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 22 05:23:03 crc kubenswrapper[4814]: I0122 05:23:03.847852 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.082693 4814 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.092665 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=37.092602908 podStartE2EDuration="37.092602908s" podCreationTimestamp="2026-01-22 05:22:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:22:50.393619258 +0000 UTC m=+256.477107473" watchObservedRunningTime="2026-01-22 05:23:04.092602908 +0000 UTC m=+270.176091163" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.094202 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.094340 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.103497 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.126678 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=14.12661366 podStartE2EDuration="14.12661366s" podCreationTimestamp="2026-01-22 05:22:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:23:04.122197924 +0000 UTC m=+270.205686189" watchObservedRunningTime="2026-01-22 05:23:04.12661366 +0000 UTC m=+270.210101885" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.207865 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.292800 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.295085 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.341793 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.385397 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.485556 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.575881 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.576165 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.660269 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.779288 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 22 05:23:04 crc kubenswrapper[4814]: I0122 05:23:04.782163 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.026317 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.045765 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.100191 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.102329 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.140502 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.184816 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.293216 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.320234 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.333704 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.347614 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.382532 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.392293 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.398276 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.414351 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.489267 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.550880 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.552208 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.610125 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.739141 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.777410 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.847141 4814 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.917412 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.950545 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.992879 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 22 05:23:05 crc kubenswrapper[4814]: I0122 05:23:05.997519 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.065410 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.151822 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.152535 4814 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.257817 4814 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.324102 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.473135 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.509343 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.559084 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.642317 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.727678 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.783190 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.785934 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.848222 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.867717 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.871224 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.932950 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.943503 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.964723 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 22 05:23:06 crc kubenswrapper[4814]: I0122 05:23:06.974570 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.016046 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.060775 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.064467 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.086730 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.096772 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.156248 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.200202 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.334189 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.418116 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.470299 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.479577 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.530050 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.533601 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.674563 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.764286 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.962106 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 22 05:23:07 crc kubenswrapper[4814]: I0122 05:23:07.985329 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.009960 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.105369 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.136867 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.284049 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.313722 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.323461 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.367662 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.578963 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.653299 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.653513 4814 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.742158 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.746951 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.805359 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.807601 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:23:08 crc kubenswrapper[4814]: I0122 05:23:08.914208 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.039423 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.091371 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.098222 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.098723 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.099724 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.138828 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.219719 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.333861 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.369462 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.425818 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.446609 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.481060 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.488940 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.503235 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.597367 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.620187 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.621872 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.682883 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.733883 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.932777 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 22 05:23:09 crc kubenswrapper[4814]: I0122 05:23:09.984894 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.011227 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.026110 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.032867 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.268756 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.325906 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.415452 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.537741 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.616520 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.625599 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.645353 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.661182 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.674402 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.825372 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.838688 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.845334 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 22 05:23:10 crc kubenswrapper[4814]: I0122 05:23:10.911092 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.072794 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.074983 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.107015 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.204005 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.252988 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.304456 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.342241 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.447282 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.523962 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.568109 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.592348 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.673051 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.790028 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.798193 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.839206 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.879260 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.890590 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.962916 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.968387 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 22 05:23:11 crc kubenswrapper[4814]: I0122 05:23:11.982004 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.057729 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.131878 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.212305 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.270290 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.349260 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.422754 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.431116 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.496815 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.526464 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.544181 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.613847 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.630729 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.647391 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.677032 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.677145 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.780844 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.839333 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 22 05:23:12 crc kubenswrapper[4814]: I0122 05:23:12.887382 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.014532 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.021703 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.049806 4814 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.050130 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea" gracePeriod=5 Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.060721 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.088693 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.122765 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.133499 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.279837 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.294795 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.343816 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.364193 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.470909 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.475922 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.611074 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.641129 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.728071 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.790994 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.964366 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 22 05:23:13 crc kubenswrapper[4814]: I0122 05:23:13.990955 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.159351 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.160942 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.317078 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.384893 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.448858 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.454972 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.742317 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.781684 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.783335 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.886550 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 22 05:23:14 crc kubenswrapper[4814]: I0122 05:23:14.898398 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.013835 4814 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.090486 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.131548 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.182231 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.268246 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.391732 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.471152 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.589688 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.626447 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 05:23:15 crc kubenswrapper[4814]: I0122 05:23:15.725888 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 22 05:23:16 crc kubenswrapper[4814]: I0122 05:23:16.158700 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 22 05:23:16 crc kubenswrapper[4814]: I0122 05:23:16.203414 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:23:16 crc kubenswrapper[4814]: I0122 05:23:16.285537 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 22 05:23:16 crc kubenswrapper[4814]: I0122 05:23:16.374841 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 22 05:23:16 crc kubenswrapper[4814]: I0122 05:23:16.539141 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 22 05:23:16 crc kubenswrapper[4814]: I0122 05:23:16.833856 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.106105 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.530664 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vwp7v"] Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.531845 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vwp7v" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerName="registry-server" containerID="cri-o://31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263" gracePeriod=30 Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.555139 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b84wr"] Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.556090 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b84wr" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="registry-server" containerID="cri-o://957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39" gracePeriod=30 Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.564726 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g9h6j"] Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.564939 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" podUID="0b9f4ced-dcb7-458a-a111-71d67169f45b" containerName="marketplace-operator" containerID="cri-o://05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd" gracePeriod=30 Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.569327 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sl7ng"] Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.569682 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sl7ng" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="registry-server" containerID="cri-o://b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda" gracePeriod=30 Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.576025 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t7mps"] Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.576328 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t7mps" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="registry-server" containerID="cri-o://b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081" gracePeriod=30 Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.597610 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8bh56"] Jan 22 05:23:17 crc kubenswrapper[4814]: E0122 05:23:17.599199 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" containerName="installer" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.599224 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" containerName="installer" Jan 22 05:23:17 crc kubenswrapper[4814]: E0122 05:23:17.599244 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.599252 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.599363 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.599377 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a84f2d9-eeed-4106-8d46-11e6e0bf5f71" containerName="installer" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.602681 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.610938 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8bh56"] Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.742475 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghgr4\" (UniqueName: \"kubernetes.io/projected/faa29fdf-b67e-47d7-bd8d-35612571106a-kube-api-access-ghgr4\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.742547 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/faa29fdf-b67e-47d7-bd8d-35612571106a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.742583 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/faa29fdf-b67e-47d7-bd8d-35612571106a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.844253 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghgr4\" (UniqueName: \"kubernetes.io/projected/faa29fdf-b67e-47d7-bd8d-35612571106a-kube-api-access-ghgr4\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.844538 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/faa29fdf-b67e-47d7-bd8d-35612571106a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.844606 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/faa29fdf-b67e-47d7-bd8d-35612571106a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.845921 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/faa29fdf-b67e-47d7-bd8d-35612571106a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.855717 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/faa29fdf-b67e-47d7-bd8d-35612571106a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.865500 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghgr4\" (UniqueName: \"kubernetes.io/projected/faa29fdf-b67e-47d7-bd8d-35612571106a-kube-api-access-ghgr4\") pod \"marketplace-operator-79b997595-8bh56\" (UID: \"faa29fdf-b67e-47d7-bd8d-35612571106a\") " pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.898019 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.930889 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:17 crc kubenswrapper[4814]: I0122 05:23:17.970302 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.013666 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.016835 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.022938 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.045832 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-utilities\") pod \"8d371e5b-a490-441d-90c3-ead8479f81dc\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.045880 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-utilities\") pod \"dfc03373-04f8-49da-a3d6-5428a0324db5\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.045907 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9t2z\" (UniqueName: \"kubernetes.io/projected/dfc03373-04f8-49da-a3d6-5428a0324db5-kube-api-access-d9t2z\") pod \"dfc03373-04f8-49da-a3d6-5428a0324db5\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.045955 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ps6zm\" (UniqueName: \"kubernetes.io/projected/8d371e5b-a490-441d-90c3-ead8479f81dc-kube-api-access-ps6zm\") pod \"8d371e5b-a490-441d-90c3-ead8479f81dc\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.046014 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-catalog-content\") pod \"dfc03373-04f8-49da-a3d6-5428a0324db5\" (UID: \"dfc03373-04f8-49da-a3d6-5428a0324db5\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.046065 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-catalog-content\") pod \"8d371e5b-a490-441d-90c3-ead8479f81dc\" (UID: \"8d371e5b-a490-441d-90c3-ead8479f81dc\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.047414 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-utilities" (OuterVolumeSpecName: "utilities") pod "8d371e5b-a490-441d-90c3-ead8479f81dc" (UID: "8d371e5b-a490-441d-90c3-ead8479f81dc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.047792 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-utilities" (OuterVolumeSpecName: "utilities") pod "dfc03373-04f8-49da-a3d6-5428a0324db5" (UID: "dfc03373-04f8-49da-a3d6-5428a0324db5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.051506 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d371e5b-a490-441d-90c3-ead8479f81dc-kube-api-access-ps6zm" (OuterVolumeSpecName: "kube-api-access-ps6zm") pod "8d371e5b-a490-441d-90c3-ead8479f81dc" (UID: "8d371e5b-a490-441d-90c3-ead8479f81dc"). InnerVolumeSpecName "kube-api-access-ps6zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.066279 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfc03373-04f8-49da-a3d6-5428a0324db5-kube-api-access-d9t2z" (OuterVolumeSpecName: "kube-api-access-d9t2z") pod "dfc03373-04f8-49da-a3d6-5428a0324db5" (UID: "dfc03373-04f8-49da-a3d6-5428a0324db5"). InnerVolumeSpecName "kube-api-access-d9t2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.114268 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d371e5b-a490-441d-90c3-ead8479f81dc" (UID: "8d371e5b-a490-441d-90c3-ead8479f81dc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.138472 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dfc03373-04f8-49da-a3d6-5428a0324db5" (UID: "dfc03373-04f8-49da-a3d6-5428a0324db5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.142201 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.142270 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147023 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8ttm\" (UniqueName: \"kubernetes.io/projected/0b9f4ced-dcb7-458a-a111-71d67169f45b-kube-api-access-r8ttm\") pod \"0b9f4ced-dcb7-458a-a111-71d67169f45b\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147069 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-catalog-content\") pod \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147096 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlpgt\" (UniqueName: \"kubernetes.io/projected/24689da3-97aa-4d34-ad33-4fdb8950e6a9-kube-api-access-nlpgt\") pod \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147124 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-utilities\") pod \"33942fc4-20af-4f7f-a3db-e04a2356e2db\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147147 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5jsl\" (UniqueName: \"kubernetes.io/projected/33942fc4-20af-4f7f-a3db-e04a2356e2db-kube-api-access-v5jsl\") pod \"33942fc4-20af-4f7f-a3db-e04a2356e2db\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147209 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-trusted-ca\") pod \"0b9f4ced-dcb7-458a-a111-71d67169f45b\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147238 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-operator-metrics\") pod \"0b9f4ced-dcb7-458a-a111-71d67169f45b\" (UID: \"0b9f4ced-dcb7-458a-a111-71d67169f45b\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147276 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-utilities\") pod \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\" (UID: \"24689da3-97aa-4d34-ad33-4fdb8950e6a9\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147299 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-catalog-content\") pod \"33942fc4-20af-4f7f-a3db-e04a2356e2db\" (UID: \"33942fc4-20af-4f7f-a3db-e04a2356e2db\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147480 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147490 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147499 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d371e5b-a490-441d-90c3-ead8479f81dc-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147507 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc03373-04f8-49da-a3d6-5428a0324db5-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147516 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9t2z\" (UniqueName: \"kubernetes.io/projected/dfc03373-04f8-49da-a3d6-5428a0324db5-kube-api-access-d9t2z\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.147526 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ps6zm\" (UniqueName: \"kubernetes.io/projected/8d371e5b-a490-441d-90c3-ead8479f81dc-kube-api-access-ps6zm\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.148754 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-utilities" (OuterVolumeSpecName: "utilities") pod "33942fc4-20af-4f7f-a3db-e04a2356e2db" (UID: "33942fc4-20af-4f7f-a3db-e04a2356e2db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.149232 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "0b9f4ced-dcb7-458a-a111-71d67169f45b" (UID: "0b9f4ced-dcb7-458a-a111-71d67169f45b"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.149705 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-utilities" (OuterVolumeSpecName: "utilities") pod "24689da3-97aa-4d34-ad33-4fdb8950e6a9" (UID: "24689da3-97aa-4d34-ad33-4fdb8950e6a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.151093 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24689da3-97aa-4d34-ad33-4fdb8950e6a9-kube-api-access-nlpgt" (OuterVolumeSpecName: "kube-api-access-nlpgt") pod "24689da3-97aa-4d34-ad33-4fdb8950e6a9" (UID: "24689da3-97aa-4d34-ad33-4fdb8950e6a9"). InnerVolumeSpecName "kube-api-access-nlpgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.158353 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33942fc4-20af-4f7f-a3db-e04a2356e2db-kube-api-access-v5jsl" (OuterVolumeSpecName: "kube-api-access-v5jsl") pod "33942fc4-20af-4f7f-a3db-e04a2356e2db" (UID: "33942fc4-20af-4f7f-a3db-e04a2356e2db"). InnerVolumeSpecName "kube-api-access-v5jsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.158562 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b9f4ced-dcb7-458a-a111-71d67169f45b-kube-api-access-r8ttm" (OuterVolumeSpecName: "kube-api-access-r8ttm") pod "0b9f4ced-dcb7-458a-a111-71d67169f45b" (UID: "0b9f4ced-dcb7-458a-a111-71d67169f45b"). InnerVolumeSpecName "kube-api-access-r8ttm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.160986 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "0b9f4ced-dcb7-458a-a111-71d67169f45b" (UID: "0b9f4ced-dcb7-458a-a111-71d67169f45b"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.179367 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "33942fc4-20af-4f7f-a3db-e04a2356e2db" (UID: "33942fc4-20af-4f7f-a3db-e04a2356e2db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.245533 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-8bh56"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.247797 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.247838 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.247856 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.247893 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.247927 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248148 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248165 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8ttm\" (UniqueName: \"kubernetes.io/projected/0b9f4ced-dcb7-458a-a111-71d67169f45b-kube-api-access-r8ttm\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248176 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlpgt\" (UniqueName: \"kubernetes.io/projected/24689da3-97aa-4d34-ad33-4fdb8950e6a9-kube-api-access-nlpgt\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248183 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33942fc4-20af-4f7f-a3db-e04a2356e2db-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248191 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5jsl\" (UniqueName: \"kubernetes.io/projected/33942fc4-20af-4f7f-a3db-e04a2356e2db-kube-api-access-v5jsl\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248200 4814 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248208 4814 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/0b9f4ced-dcb7-458a-a111-71d67169f45b-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248216 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248251 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248279 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248295 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.248429 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.254417 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.285808 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24689da3-97aa-4d34-ad33-4fdb8950e6a9" (UID: "24689da3-97aa-4d34-ad33-4fdb8950e6a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.356097 4814 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.356131 4814 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.356146 4814 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.356160 4814 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.356172 4814 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.356183 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24689da3-97aa-4d34-ad33-4fdb8950e6a9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.357174 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.357385 4814 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.370741 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.370774 4814 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="143666b6-db82-4266-8aa7-0bf20045364a" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.371824 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.371858 4814 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="143666b6-db82-4266-8aa7-0bf20045364a" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.475927 4814 generic.go:334] "Generic (PLEG): container finished" podID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerID="b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda" exitCode=0 Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.475996 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sl7ng" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.476000 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sl7ng" event={"ID":"33942fc4-20af-4f7f-a3db-e04a2356e2db","Type":"ContainerDied","Data":"b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.476031 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sl7ng" event={"ID":"33942fc4-20af-4f7f-a3db-e04a2356e2db","Type":"ContainerDied","Data":"669736cf1229424b42a57a4d5f8ee45186a2df08fca736a8a373458b4c41c947"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.476047 4814 scope.go:117] "RemoveContainer" containerID="b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.479040 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.479247 4814 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea" exitCode=137 Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.479296 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.482028 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" event={"ID":"faa29fdf-b67e-47d7-bd8d-35612571106a","Type":"ContainerStarted","Data":"c0ef279a58be5bd8e37474f11ad83e049ef47bf9dc2d62d67543b0ae54505600"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.482061 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" event={"ID":"faa29fdf-b67e-47d7-bd8d-35612571106a","Type":"ContainerStarted","Data":"d2ac99ee800120dd972a2d661f1b166126d5599fd893f448374a64278b568476"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.482383 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.484953 4814 generic.go:334] "Generic (PLEG): container finished" podID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerID="957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39" exitCode=0 Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.485039 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b84wr" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.485050 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b84wr" event={"ID":"dfc03373-04f8-49da-a3d6-5428a0324db5","Type":"ContainerDied","Data":"957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.485887 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b84wr" event={"ID":"dfc03373-04f8-49da-a3d6-5428a0324db5","Type":"ContainerDied","Data":"b4820c45a4fdab458d9335112a83cbc76229d5a7163425777666302600f73686"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.486172 4814 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-8bh56 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.57:8080/healthz\": dial tcp 10.217.0.57:8080: connect: connection refused" start-of-body= Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.486200 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" podUID="faa29fdf-b67e-47d7-bd8d-35612571106a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.57:8080/healthz\": dial tcp 10.217.0.57:8080: connect: connection refused" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.489000 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sl7ng"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.489857 4814 generic.go:334] "Generic (PLEG): container finished" podID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerID="b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081" exitCode=0 Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.489954 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7mps" event={"ID":"24689da3-97aa-4d34-ad33-4fdb8950e6a9","Type":"ContainerDied","Data":"b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.489976 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7mps" event={"ID":"24689da3-97aa-4d34-ad33-4fdb8950e6a9","Type":"ContainerDied","Data":"178c764cc3e28a7997216fcdd21c06e8d01bc9255bc5596213a733db579e1edc"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.490064 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7mps" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.494020 4814 scope.go:117] "RemoveContainer" containerID="49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.495734 4814 generic.go:334] "Generic (PLEG): container finished" podID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerID="31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263" exitCode=0 Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.495858 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwp7v" event={"ID":"8d371e5b-a490-441d-90c3-ead8479f81dc","Type":"ContainerDied","Data":"31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.495935 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vwp7v" event={"ID":"8d371e5b-a490-441d-90c3-ead8479f81dc","Type":"ContainerDied","Data":"3ad88bf61ea8b8beda837318abed0b1ae109c5242e69b90ca970e7b99b4772d0"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.496066 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vwp7v" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.498408 4814 generic.go:334] "Generic (PLEG): container finished" podID="0b9f4ced-dcb7-458a-a111-71d67169f45b" containerID="05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd" exitCode=0 Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.498442 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" event={"ID":"0b9f4ced-dcb7-458a-a111-71d67169f45b","Type":"ContainerDied","Data":"05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.498463 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" event={"ID":"0b9f4ced-dcb7-458a-a111-71d67169f45b","Type":"ContainerDied","Data":"1fc5735fed31aeb09fee25efe65b2893ce9de65288c8092f259b7dc07c0b6211"} Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.498514 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-g9h6j" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.503204 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sl7ng"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.511181 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" podStartSLOduration=1.5111630470000001 podStartE2EDuration="1.511163047s" podCreationTimestamp="2026-01-22 05:23:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:23:18.507307708 +0000 UTC m=+284.590795923" watchObservedRunningTime="2026-01-22 05:23:18.511163047 +0000 UTC m=+284.594651262" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.512609 4814 scope.go:117] "RemoveContainer" containerID="ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.526266 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t7mps"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.529847 4814 scope.go:117] "RemoveContainer" containerID="b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.531106 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda\": container with ID starting with b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda not found: ID does not exist" containerID="b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.531133 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda"} err="failed to get container status \"b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda\": rpc error: code = NotFound desc = could not find container \"b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda\": container with ID starting with b486b4e9774bc52c306433ea1a17fb7da03ad21bc3baa9d9d8722b43d6c77cda not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.531151 4814 scope.go:117] "RemoveContainer" containerID="49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.531381 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595\": container with ID starting with 49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595 not found: ID does not exist" containerID="49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.531396 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595"} err="failed to get container status \"49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595\": rpc error: code = NotFound desc = could not find container \"49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595\": container with ID starting with 49ae8b672ce05325c7a0eb6865543d187972b0092794b0c82234f91a71bb2595 not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.531409 4814 scope.go:117] "RemoveContainer" containerID="ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.533085 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15\": container with ID starting with ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15 not found: ID does not exist" containerID="ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.533105 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15"} err="failed to get container status \"ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15\": rpc error: code = NotFound desc = could not find container \"ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15\": container with ID starting with ffb067413404ef5ded47f27a0b3e4ddcf3f6a4a79962043d063818db6b8a5f15 not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.533143 4814 scope.go:117] "RemoveContainer" containerID="1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.535283 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t7mps"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.538360 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g9h6j"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.544157 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-g9h6j"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.546548 4814 scope.go:117] "RemoveContainer" containerID="1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.546767 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b84wr"] Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.547118 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea\": container with ID starting with 1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea not found: ID does not exist" containerID="1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.547160 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea"} err="failed to get container status \"1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea\": rpc error: code = NotFound desc = could not find container \"1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea\": container with ID starting with 1af7c131b7081fefdd54ac0e5eb0e197160388f7f56944c749102875e34f02ea not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.547200 4814 scope.go:117] "RemoveContainer" containerID="957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.549543 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b84wr"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.555300 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vwp7v"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.558303 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vwp7v"] Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.564004 4814 scope.go:117] "RemoveContainer" containerID="99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.586346 4814 scope.go:117] "RemoveContainer" containerID="86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.599680 4814 scope.go:117] "RemoveContainer" containerID="957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.600189 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39\": container with ID starting with 957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39 not found: ID does not exist" containerID="957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.600242 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39"} err="failed to get container status \"957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39\": rpc error: code = NotFound desc = could not find container \"957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39\": container with ID starting with 957f349cf8e513db5222805327216b5b03e39c7313bf995666e38652f3f6df39 not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.600281 4814 scope.go:117] "RemoveContainer" containerID="99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.600582 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e\": container with ID starting with 99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e not found: ID does not exist" containerID="99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.600613 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e"} err="failed to get container status \"99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e\": rpc error: code = NotFound desc = could not find container \"99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e\": container with ID starting with 99745aae137d2582e9bc2dbab8d4b8b60818ff6a2cd2e8118419c2d8a29c8d7e not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.600680 4814 scope.go:117] "RemoveContainer" containerID="86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.600913 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961\": container with ID starting with 86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961 not found: ID does not exist" containerID="86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.600939 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961"} err="failed to get container status \"86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961\": rpc error: code = NotFound desc = could not find container \"86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961\": container with ID starting with 86bccf11918c25bf075a1ad7a8d567befe131a613acf146db0d6a4ff70194961 not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.600955 4814 scope.go:117] "RemoveContainer" containerID="b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.613207 4814 scope.go:117] "RemoveContainer" containerID="0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.626477 4814 scope.go:117] "RemoveContainer" containerID="075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.637907 4814 scope.go:117] "RemoveContainer" containerID="b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.638297 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081\": container with ID starting with b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081 not found: ID does not exist" containerID="b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.638346 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081"} err="failed to get container status \"b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081\": rpc error: code = NotFound desc = could not find container \"b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081\": container with ID starting with b6476662b42d2aa1deaac1d3981a34abb178d5a506741daf5b2f2765ff8e5081 not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.638377 4814 scope.go:117] "RemoveContainer" containerID="0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.638720 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb\": container with ID starting with 0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb not found: ID does not exist" containerID="0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.638747 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb"} err="failed to get container status \"0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb\": rpc error: code = NotFound desc = could not find container \"0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb\": container with ID starting with 0cdb1028fa8e50ce528b6883fe5ac80c36f8b0070acda290ffb68e4b6c3f80bb not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.638767 4814 scope.go:117] "RemoveContainer" containerID="075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.638984 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8\": container with ID starting with 075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8 not found: ID does not exist" containerID="075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.639010 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8"} err="failed to get container status \"075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8\": rpc error: code = NotFound desc = could not find container \"075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8\": container with ID starting with 075d4a694168d5c5df684332d65c779f62c421a45a1c76cc22266f2d9e9447f8 not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.639025 4814 scope.go:117] "RemoveContainer" containerID="31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.654370 4814 scope.go:117] "RemoveContainer" containerID="96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.677096 4814 scope.go:117] "RemoveContainer" containerID="05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.732274 4814 scope.go:117] "RemoveContainer" containerID="31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.732695 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263\": container with ID starting with 31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263 not found: ID does not exist" containerID="31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.732722 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263"} err="failed to get container status \"31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263\": rpc error: code = NotFound desc = could not find container \"31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263\": container with ID starting with 31ecc52f4d00b83a6d5e44635b63c3a06b8eb8409566e4aadc436b78bc8f1263 not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.732745 4814 scope.go:117] "RemoveContainer" containerID="96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.734220 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d\": container with ID starting with 96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d not found: ID does not exist" containerID="96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.734254 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d"} err="failed to get container status \"96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d\": rpc error: code = NotFound desc = could not find container \"96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d\": container with ID starting with 96b0259b9df6d429fe09a94006389caa8efb2566adc79cb3c700eae719d6514d not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.734281 4814 scope.go:117] "RemoveContainer" containerID="05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.735034 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33\": container with ID starting with 05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33 not found: ID does not exist" containerID="05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.735060 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33"} err="failed to get container status \"05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33\": rpc error: code = NotFound desc = could not find container \"05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33\": container with ID starting with 05da5e92287006c251e624dc3c57aba82ffac19ebcac2e0f07b6532c61308b33 not found: ID does not exist" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.735078 4814 scope.go:117] "RemoveContainer" containerID="05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.751711 4814 scope.go:117] "RemoveContainer" containerID="05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd" Jan 22 05:23:18 crc kubenswrapper[4814]: E0122 05:23:18.752142 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd\": container with ID starting with 05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd not found: ID does not exist" containerID="05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd" Jan 22 05:23:18 crc kubenswrapper[4814]: I0122 05:23:18.752168 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd"} err="failed to get container status \"05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd\": rpc error: code = NotFound desc = could not find container \"05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd\": container with ID starting with 05b52777b6a4e9102ad9f23e75ee4370350b7b6c8e13cbaecaaf36a105b2cdbd not found: ID does not exist" Jan 22 05:23:19 crc kubenswrapper[4814]: I0122 05:23:19.520124 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-8bh56" Jan 22 05:23:20 crc kubenswrapper[4814]: I0122 05:23:20.359311 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b9f4ced-dcb7-458a-a111-71d67169f45b" path="/var/lib/kubelet/pods/0b9f4ced-dcb7-458a-a111-71d67169f45b/volumes" Jan 22 05:23:20 crc kubenswrapper[4814]: I0122 05:23:20.361098 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" path="/var/lib/kubelet/pods/24689da3-97aa-4d34-ad33-4fdb8950e6a9/volumes" Jan 22 05:23:20 crc kubenswrapper[4814]: I0122 05:23:20.362450 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" path="/var/lib/kubelet/pods/33942fc4-20af-4f7f-a3db-e04a2356e2db/volumes" Jan 22 05:23:20 crc kubenswrapper[4814]: I0122 05:23:20.364757 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" path="/var/lib/kubelet/pods/8d371e5b-a490-441d-90c3-ead8479f81dc/volumes" Jan 22 05:23:20 crc kubenswrapper[4814]: I0122 05:23:20.366263 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" path="/var/lib/kubelet/pods/dfc03373-04f8-49da-a3d6-5428a0324db5/volumes" Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.548774 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ggbzf"] Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.549587 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" podUID="5388d463-7ff7-4465-ab69-3d0015d91232" containerName="controller-manager" containerID="cri-o://04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4" gracePeriod=30 Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.648499 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7"] Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.648715 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" podUID="47d42067-0194-4d5d-8cc8-a49e9065bc9b" containerName="route-controller-manager" containerID="cri-o://ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f" gracePeriod=30 Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.944345 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.979158 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mxcw5\" (UniqueName: \"kubernetes.io/projected/5388d463-7ff7-4465-ab69-3d0015d91232-kube-api-access-mxcw5\") pod \"5388d463-7ff7-4465-ab69-3d0015d91232\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.979206 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-client-ca\") pod \"5388d463-7ff7-4465-ab69-3d0015d91232\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.979283 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-config\") pod \"5388d463-7ff7-4465-ab69-3d0015d91232\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.979373 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-proxy-ca-bundles\") pod \"5388d463-7ff7-4465-ab69-3d0015d91232\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.980133 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-client-ca" (OuterVolumeSpecName: "client-ca") pod "5388d463-7ff7-4465-ab69-3d0015d91232" (UID: "5388d463-7ff7-4465-ab69-3d0015d91232"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.980177 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-config" (OuterVolumeSpecName: "config") pod "5388d463-7ff7-4465-ab69-3d0015d91232" (UID: "5388d463-7ff7-4465-ab69-3d0015d91232"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.980216 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5388d463-7ff7-4465-ab69-3d0015d91232" (UID: "5388d463-7ff7-4465-ab69-3d0015d91232"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.985484 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5388d463-7ff7-4465-ab69-3d0015d91232-kube-api-access-mxcw5" (OuterVolumeSpecName: "kube-api-access-mxcw5") pod "5388d463-7ff7-4465-ab69-3d0015d91232" (UID: "5388d463-7ff7-4465-ab69-3d0015d91232"). InnerVolumeSpecName "kube-api-access-mxcw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:33 crc kubenswrapper[4814]: I0122 05:23:33.995158 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.080478 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5388d463-7ff7-4465-ab69-3d0015d91232-serving-cert\") pod \"5388d463-7ff7-4465-ab69-3d0015d91232\" (UID: \"5388d463-7ff7-4465-ab69-3d0015d91232\") " Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.080810 4814 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.080864 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mxcw5\" (UniqueName: \"kubernetes.io/projected/5388d463-7ff7-4465-ab69-3d0015d91232-kube-api-access-mxcw5\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.080890 4814 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.080920 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5388d463-7ff7-4465-ab69-3d0015d91232-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.083581 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5388d463-7ff7-4465-ab69-3d0015d91232-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5388d463-7ff7-4465-ab69-3d0015d91232" (UID: "5388d463-7ff7-4465-ab69-3d0015d91232"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.168429 4814 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.182185 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kgsm\" (UniqueName: \"kubernetes.io/projected/47d42067-0194-4d5d-8cc8-a49e9065bc9b-kube-api-access-5kgsm\") pod \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.182256 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-config\") pod \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.182300 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-client-ca\") pod \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.182348 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47d42067-0194-4d5d-8cc8-a49e9065bc9b-serving-cert\") pod \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\" (UID: \"47d42067-0194-4d5d-8cc8-a49e9065bc9b\") " Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.182586 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5388d463-7ff7-4465-ab69-3d0015d91232-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.183468 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-client-ca" (OuterVolumeSpecName: "client-ca") pod "47d42067-0194-4d5d-8cc8-a49e9065bc9b" (UID: "47d42067-0194-4d5d-8cc8-a49e9065bc9b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.183559 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-config" (OuterVolumeSpecName: "config") pod "47d42067-0194-4d5d-8cc8-a49e9065bc9b" (UID: "47d42067-0194-4d5d-8cc8-a49e9065bc9b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.185893 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47d42067-0194-4d5d-8cc8-a49e9065bc9b-kube-api-access-5kgsm" (OuterVolumeSpecName: "kube-api-access-5kgsm") pod "47d42067-0194-4d5d-8cc8-a49e9065bc9b" (UID: "47d42067-0194-4d5d-8cc8-a49e9065bc9b"). InnerVolumeSpecName "kube-api-access-5kgsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.187023 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47d42067-0194-4d5d-8cc8-a49e9065bc9b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "47d42067-0194-4d5d-8cc8-a49e9065bc9b" (UID: "47d42067-0194-4d5d-8cc8-a49e9065bc9b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.283791 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kgsm\" (UniqueName: \"kubernetes.io/projected/47d42067-0194-4d5d-8cc8-a49e9065bc9b-kube-api-access-5kgsm\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.283843 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.283863 4814 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/47d42067-0194-4d5d-8cc8-a49e9065bc9b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.283880 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47d42067-0194-4d5d-8cc8-a49e9065bc9b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.587196 4814 generic.go:334] "Generic (PLEG): container finished" podID="5388d463-7ff7-4465-ab69-3d0015d91232" containerID="04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4" exitCode=0 Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.587284 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" event={"ID":"5388d463-7ff7-4465-ab69-3d0015d91232","Type":"ContainerDied","Data":"04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4"} Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.587314 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" event={"ID":"5388d463-7ff7-4465-ab69-3d0015d91232","Type":"ContainerDied","Data":"21765e9e16c3f4eb1fb2fa73249e947d958f9b0addc4c6a43acb35ece115db9f"} Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.587310 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ggbzf" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.587852 4814 scope.go:117] "RemoveContainer" containerID="04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.591937 4814 generic.go:334] "Generic (PLEG): container finished" podID="47d42067-0194-4d5d-8cc8-a49e9065bc9b" containerID="ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f" exitCode=0 Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.591970 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" event={"ID":"47d42067-0194-4d5d-8cc8-a49e9065bc9b","Type":"ContainerDied","Data":"ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f"} Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.591985 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" event={"ID":"47d42067-0194-4d5d-8cc8-a49e9065bc9b","Type":"ContainerDied","Data":"653f4c843b776e66c52ddd36b71975c110ff391a9b2aa42ab7844d924008604c"} Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.591985 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.612842 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ggbzf"] Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.618942 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ggbzf"] Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.621096 4814 scope.go:117] "RemoveContainer" containerID="04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4" Jan 22 05:23:34 crc kubenswrapper[4814]: E0122 05:23:34.621544 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4\": container with ID starting with 04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4 not found: ID does not exist" containerID="04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.621587 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4"} err="failed to get container status \"04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4\": rpc error: code = NotFound desc = could not find container \"04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4\": container with ID starting with 04596947484eb17872d09bb3d8e1fa190df2907c4ca7e03c7d54a5794b3976b4 not found: ID does not exist" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.621614 4814 scope.go:117] "RemoveContainer" containerID="ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.623181 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7"] Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.626874 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-c9hn7"] Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.631940 4814 scope.go:117] "RemoveContainer" containerID="ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f" Jan 22 05:23:34 crc kubenswrapper[4814]: E0122 05:23:34.633955 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f\": container with ID starting with ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f not found: ID does not exist" containerID="ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f" Jan 22 05:23:34 crc kubenswrapper[4814]: I0122 05:23:34.634356 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f"} err="failed to get container status \"ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f\": rpc error: code = NotFound desc = could not find container \"ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f\": container with ID starting with ff2d2b1ee73c07734bb3c7220e53fc5b7ced27f473092576b3b037b163145e9f not found: ID does not exist" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.439524 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq"] Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.439883 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="extract-utilities" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.439904 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="extract-utilities" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.439921 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="extract-content" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.439934 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="extract-content" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.439947 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5388d463-7ff7-4465-ab69-3d0015d91232" containerName="controller-manager" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.439960 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="5388d463-7ff7-4465-ab69-3d0015d91232" containerName="controller-manager" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.439974 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerName="extract-utilities" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.439986 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerName="extract-utilities" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440005 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="extract-utilities" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440017 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="extract-utilities" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440036 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440049 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440070 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="extract-content" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440082 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="extract-content" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440100 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440113 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440132 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="extract-content" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440144 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="extract-content" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440158 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b9f4ced-dcb7-458a-a111-71d67169f45b" containerName="marketplace-operator" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440170 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b9f4ced-dcb7-458a-a111-71d67169f45b" containerName="marketplace-operator" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440188 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440200 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440215 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerName="extract-content" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440228 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerName="extract-content" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440248 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440259 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440275 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47d42067-0194-4d5d-8cc8-a49e9065bc9b" containerName="route-controller-manager" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440289 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="47d42067-0194-4d5d-8cc8-a49e9065bc9b" containerName="route-controller-manager" Jan 22 05:23:35 crc kubenswrapper[4814]: E0122 05:23:35.440310 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="extract-utilities" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440323 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="extract-utilities" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440501 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfc03373-04f8-49da-a3d6-5428a0324db5" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440519 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="5388d463-7ff7-4465-ab69-3d0015d91232" containerName="controller-manager" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440537 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="47d42067-0194-4d5d-8cc8-a49e9065bc9b" containerName="route-controller-manager" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440557 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d371e5b-a490-441d-90c3-ead8479f81dc" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440575 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b9f4ced-dcb7-458a-a111-71d67169f45b" containerName="marketplace-operator" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440593 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="33942fc4-20af-4f7f-a3db-e04a2356e2db" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.440612 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="24689da3-97aa-4d34-ad33-4fdb8950e6a9" containerName="registry-server" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.441220 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.443521 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.443664 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.444059 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.444987 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.447536 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.447848 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.452410 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-74586866d5-wwnd7"] Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.453030 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.465481 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.465663 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.465972 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.466152 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.466447 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.466552 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.468590 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74586866d5-wwnd7"] Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.474718 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:23:35 crc kubenswrapper[4814]: I0122 05:23:35.502302 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq"] Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761340 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-proxy-ca-bundles\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761398 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/746a070b-3e04-4b4a-8e40-87a2cbc593ab-serving-cert\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761435 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-config\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761520 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-client-ca\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761557 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-serving-cert\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761624 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks8fn\" (UniqueName: \"kubernetes.io/projected/746a070b-3e04-4b4a-8e40-87a2cbc593ab-kube-api-access-ks8fn\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761684 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlt8m\" (UniqueName: \"kubernetes.io/projected/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-kube-api-access-qlt8m\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761758 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-config\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.761852 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-client-ca\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.776972 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47d42067-0194-4d5d-8cc8-a49e9065bc9b" path="/var/lib/kubelet/pods/47d42067-0194-4d5d-8cc8-a49e9065bc9b/volumes" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.778040 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5388d463-7ff7-4465-ab69-3d0015d91232" path="/var/lib/kubelet/pods/5388d463-7ff7-4465-ab69-3d0015d91232/volumes" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863318 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-client-ca\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863409 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-proxy-ca-bundles\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863447 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/746a070b-3e04-4b4a-8e40-87a2cbc593ab-serving-cert\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863485 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-config\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863532 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-client-ca\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863567 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-serving-cert\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863612 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks8fn\" (UniqueName: \"kubernetes.io/projected/746a070b-3e04-4b4a-8e40-87a2cbc593ab-kube-api-access-ks8fn\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863676 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlt8m\" (UniqueName: \"kubernetes.io/projected/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-kube-api-access-qlt8m\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.863729 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-config\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.866055 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-config\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.867261 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-client-ca\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.868338 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-client-ca\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.869497 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-proxy-ca-bundles\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.870193 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-config\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.876302 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-serving-cert\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.876662 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/746a070b-3e04-4b4a-8e40-87a2cbc593ab-serving-cert\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.888167 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks8fn\" (UniqueName: \"kubernetes.io/projected/746a070b-3e04-4b4a-8e40-87a2cbc593ab-kube-api-access-ks8fn\") pod \"route-controller-manager-678cc6964f-6fhdq\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.888496 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlt8m\" (UniqueName: \"kubernetes.io/projected/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-kube-api-access-qlt8m\") pod \"controller-manager-74586866d5-wwnd7\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.958050 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:36 crc kubenswrapper[4814]: I0122 05:23:36.980584 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.234705 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq"] Jan 22 05:23:37 crc kubenswrapper[4814]: W0122 05:23:37.242922 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod746a070b_3e04_4b4a_8e40_87a2cbc593ab.slice/crio-ad2b873a36781ffb1ca7d0d22c288e09b5d5812aa5144c4ff4d44b0fe7c3b1ca WatchSource:0}: Error finding container ad2b873a36781ffb1ca7d0d22c288e09b5d5812aa5144c4ff4d44b0fe7c3b1ca: Status 404 returned error can't find the container with id ad2b873a36781ffb1ca7d0d22c288e09b5d5812aa5144c4ff4d44b0fe7c3b1ca Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.485945 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74586866d5-wwnd7"] Jan 22 05:23:37 crc kubenswrapper[4814]: W0122 05:23:37.506004 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85ca54c7_7833_4f46_bab1_f22f4e8bb55b.slice/crio-175eacb2e7cdd6cdd64e267e2703f0ebd647cd153fba577eff755e0416a8093a WatchSource:0}: Error finding container 175eacb2e7cdd6cdd64e267e2703f0ebd647cd153fba577eff755e0416a8093a: Status 404 returned error can't find the container with id 175eacb2e7cdd6cdd64e267e2703f0ebd647cd153fba577eff755e0416a8093a Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.803824 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" event={"ID":"746a070b-3e04-4b4a-8e40-87a2cbc593ab","Type":"ContainerStarted","Data":"27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68"} Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.805020 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.805244 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" event={"ID":"746a070b-3e04-4b4a-8e40-87a2cbc593ab","Type":"ContainerStarted","Data":"ad2b873a36781ffb1ca7d0d22c288e09b5d5812aa5144c4ff4d44b0fe7c3b1ca"} Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.805821 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" event={"ID":"85ca54c7-7833-4f46-bab1-f22f4e8bb55b","Type":"ContainerStarted","Data":"776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472"} Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.806032 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" event={"ID":"85ca54c7-7833-4f46-bab1-f22f4e8bb55b","Type":"ContainerStarted","Data":"175eacb2e7cdd6cdd64e267e2703f0ebd647cd153fba577eff755e0416a8093a"} Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.806154 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.814946 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.847251 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" podStartSLOduration=4.847236294 podStartE2EDuration="4.847236294s" podCreationTimestamp="2026-01-22 05:23:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:23:37.843426887 +0000 UTC m=+303.926915102" watchObservedRunningTime="2026-01-22 05:23:37.847236294 +0000 UTC m=+303.930724499" Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.850353 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" podStartSLOduration=4.8503431 podStartE2EDuration="4.8503431s" podCreationTimestamp="2026-01-22 05:23:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:23:37.83289119 +0000 UTC m=+303.916379405" watchObservedRunningTime="2026-01-22 05:23:37.8503431 +0000 UTC m=+303.933831315" Jan 22 05:23:37 crc kubenswrapper[4814]: I0122 05:23:37.876763 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:39 crc kubenswrapper[4814]: I0122 05:23:39.659164 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq"] Jan 22 05:23:39 crc kubenswrapper[4814]: I0122 05:23:39.664146 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74586866d5-wwnd7"] Jan 22 05:23:40 crc kubenswrapper[4814]: I0122 05:23:40.816691 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" podUID="746a070b-3e04-4b4a-8e40-87a2cbc593ab" containerName="route-controller-manager" containerID="cri-o://27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68" gracePeriod=30 Jan 22 05:23:40 crc kubenswrapper[4814]: I0122 05:23:40.817565 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" podUID="85ca54c7-7833-4f46-bab1-f22f4e8bb55b" containerName="controller-manager" containerID="cri-o://776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472" gracePeriod=30 Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.260370 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.264058 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.390525 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-client-ca\") pod \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.390752 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-proxy-ca-bundles\") pod \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.390803 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlt8m\" (UniqueName: \"kubernetes.io/projected/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-kube-api-access-qlt8m\") pod \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.390828 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-client-ca\") pod \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.390847 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/746a070b-3e04-4b4a-8e40-87a2cbc593ab-serving-cert\") pod \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391488 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-client-ca" (OuterVolumeSpecName: "client-ca") pod "85ca54c7-7833-4f46-bab1-f22f4e8bb55b" (UID: "85ca54c7-7833-4f46-bab1-f22f4e8bb55b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391553 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-config\") pod \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391546 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "85ca54c7-7833-4f46-bab1-f22f4e8bb55b" (UID: "85ca54c7-7833-4f46-bab1-f22f4e8bb55b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391607 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-serving-cert\") pod \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\" (UID: \"85ca54c7-7833-4f46-bab1-f22f4e8bb55b\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391658 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ks8fn\" (UniqueName: \"kubernetes.io/projected/746a070b-3e04-4b4a-8e40-87a2cbc593ab-kube-api-access-ks8fn\") pod \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391677 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-config\") pod \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\" (UID: \"746a070b-3e04-4b4a-8e40-87a2cbc593ab\") " Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391938 4814 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391949 4814 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.391945 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-client-ca" (OuterVolumeSpecName: "client-ca") pod "746a070b-3e04-4b4a-8e40-87a2cbc593ab" (UID: "746a070b-3e04-4b4a-8e40-87a2cbc593ab"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.392054 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-config" (OuterVolumeSpecName: "config") pod "85ca54c7-7833-4f46-bab1-f22f4e8bb55b" (UID: "85ca54c7-7833-4f46-bab1-f22f4e8bb55b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.392400 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-config" (OuterVolumeSpecName: "config") pod "746a070b-3e04-4b4a-8e40-87a2cbc593ab" (UID: "746a070b-3e04-4b4a-8e40-87a2cbc593ab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.400906 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "85ca54c7-7833-4f46-bab1-f22f4e8bb55b" (UID: "85ca54c7-7833-4f46-bab1-f22f4e8bb55b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.400972 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-kube-api-access-qlt8m" (OuterVolumeSpecName: "kube-api-access-qlt8m") pod "85ca54c7-7833-4f46-bab1-f22f4e8bb55b" (UID: "85ca54c7-7833-4f46-bab1-f22f4e8bb55b"). InnerVolumeSpecName "kube-api-access-qlt8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.400992 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/746a070b-3e04-4b4a-8e40-87a2cbc593ab-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "746a070b-3e04-4b4a-8e40-87a2cbc593ab" (UID: "746a070b-3e04-4b4a-8e40-87a2cbc593ab"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.404742 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/746a070b-3e04-4b4a-8e40-87a2cbc593ab-kube-api-access-ks8fn" (OuterVolumeSpecName: "kube-api-access-ks8fn") pod "746a070b-3e04-4b4a-8e40-87a2cbc593ab" (UID: "746a070b-3e04-4b4a-8e40-87a2cbc593ab"). InnerVolumeSpecName "kube-api-access-ks8fn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.492771 4814 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.492829 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlt8m\" (UniqueName: \"kubernetes.io/projected/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-kube-api-access-qlt8m\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.492851 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/746a070b-3e04-4b4a-8e40-87a2cbc593ab-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.492868 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.492885 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/85ca54c7-7833-4f46-bab1-f22f4e8bb55b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.492902 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ks8fn\" (UniqueName: \"kubernetes.io/projected/746a070b-3e04-4b4a-8e40-87a2cbc593ab-kube-api-access-ks8fn\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.492918 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/746a070b-3e04-4b4a-8e40-87a2cbc593ab-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.823725 4814 generic.go:334] "Generic (PLEG): container finished" podID="746a070b-3e04-4b4a-8e40-87a2cbc593ab" containerID="27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68" exitCode=0 Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.823791 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" event={"ID":"746a070b-3e04-4b4a-8e40-87a2cbc593ab","Type":"ContainerDied","Data":"27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68"} Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.823824 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.823885 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq" event={"ID":"746a070b-3e04-4b4a-8e40-87a2cbc593ab","Type":"ContainerDied","Data":"ad2b873a36781ffb1ca7d0d22c288e09b5d5812aa5144c4ff4d44b0fe7c3b1ca"} Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.823918 4814 scope.go:117] "RemoveContainer" containerID="27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.824898 4814 generic.go:334] "Generic (PLEG): container finished" podID="85ca54c7-7833-4f46-bab1-f22f4e8bb55b" containerID="776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472" exitCode=0 Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.824927 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" event={"ID":"85ca54c7-7833-4f46-bab1-f22f4e8bb55b","Type":"ContainerDied","Data":"776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472"} Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.824953 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" event={"ID":"85ca54c7-7833-4f46-bab1-f22f4e8bb55b","Type":"ContainerDied","Data":"175eacb2e7cdd6cdd64e267e2703f0ebd647cd153fba577eff755e0416a8093a"} Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.824992 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74586866d5-wwnd7" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.844680 4814 scope.go:117] "RemoveContainer" containerID="27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68" Jan 22 05:23:41 crc kubenswrapper[4814]: E0122 05:23:41.845937 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68\": container with ID starting with 27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68 not found: ID does not exist" containerID="27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.845996 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68"} err="failed to get container status \"27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68\": rpc error: code = NotFound desc = could not find container \"27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68\": container with ID starting with 27f5edd0f0fb221672b4a0f9c3616005761e69c25e8f641a459210f346de8f68 not found: ID does not exist" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.846032 4814 scope.go:117] "RemoveContainer" containerID="776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.858443 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-74586866d5-wwnd7"] Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.869582 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-74586866d5-wwnd7"] Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.872173 4814 scope.go:117] "RemoveContainer" containerID="776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472" Jan 22 05:23:41 crc kubenswrapper[4814]: E0122 05:23:41.877689 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472\": container with ID starting with 776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472 not found: ID does not exist" containerID="776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.877751 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472"} err="failed to get container status \"776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472\": rpc error: code = NotFound desc = could not find container \"776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472\": container with ID starting with 776a67040b54092ba96da6f7ec153be6f487ab34894ba7000f8cf27b72169472 not found: ID does not exist" Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.879306 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq"] Jan 22 05:23:41 crc kubenswrapper[4814]: I0122 05:23:41.884817 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-678cc6964f-6fhdq"] Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.352866 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="746a070b-3e04-4b4a-8e40-87a2cbc593ab" path="/var/lib/kubelet/pods/746a070b-3e04-4b4a-8e40-87a2cbc593ab/volumes" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.353895 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85ca54c7-7833-4f46-bab1-f22f4e8bb55b" path="/var/lib/kubelet/pods/85ca54c7-7833-4f46-bab1-f22f4e8bb55b/volumes" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.455044 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5877696bb8-bbssz"] Jan 22 05:23:42 crc kubenswrapper[4814]: E0122 05:23:42.455374 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="746a070b-3e04-4b4a-8e40-87a2cbc593ab" containerName="route-controller-manager" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.455399 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="746a070b-3e04-4b4a-8e40-87a2cbc593ab" containerName="route-controller-manager" Jan 22 05:23:42 crc kubenswrapper[4814]: E0122 05:23:42.455435 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85ca54c7-7833-4f46-bab1-f22f4e8bb55b" containerName="controller-manager" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.455448 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="85ca54c7-7833-4f46-bab1-f22f4e8bb55b" containerName="controller-manager" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.455624 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="85ca54c7-7833-4f46-bab1-f22f4e8bb55b" containerName="controller-manager" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.455675 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="746a070b-3e04-4b4a-8e40-87a2cbc593ab" containerName="route-controller-manager" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.456220 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.460949 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.464725 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.465872 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.472952 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.473006 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.474958 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.480805 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.486347 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5"] Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.487335 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.490529 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.490840 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.494316 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.494507 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.494676 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.494885 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.505786 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-config\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.505829 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-proxy-ca-bundles\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.505858 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-config\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.505890 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5eed773-3e19-43e5-b888-6defb74dcf9d-serving-cert\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.505911 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5157834-16d9-4101-9510-5bb71f933c87-serving-cert\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.505947 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-client-ca\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.505975 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-client-ca\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.506014 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5ctw\" (UniqueName: \"kubernetes.io/projected/b5157834-16d9-4101-9510-5bb71f933c87-kube-api-access-p5ctw\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.506034 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttzxh\" (UniqueName: \"kubernetes.io/projected/a5eed773-3e19-43e5-b888-6defb74dcf9d-kube-api-access-ttzxh\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.508133 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5"] Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.511965 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5877696bb8-bbssz"] Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606577 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5ctw\" (UniqueName: \"kubernetes.io/projected/b5157834-16d9-4101-9510-5bb71f933c87-kube-api-access-p5ctw\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606640 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttzxh\" (UniqueName: \"kubernetes.io/projected/a5eed773-3e19-43e5-b888-6defb74dcf9d-kube-api-access-ttzxh\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606678 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-config\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606703 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-proxy-ca-bundles\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606741 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-config\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606801 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5eed773-3e19-43e5-b888-6defb74dcf9d-serving-cert\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606826 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5157834-16d9-4101-9510-5bb71f933c87-serving-cert\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606865 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-client-ca\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.606908 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-client-ca\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.608149 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-client-ca\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.608527 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-config\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.608848 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-proxy-ca-bundles\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.608984 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-client-ca\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.608986 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-config\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.611739 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5157834-16d9-4101-9510-5bb71f933c87-serving-cert\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.616576 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5eed773-3e19-43e5-b888-6defb74dcf9d-serving-cert\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.627615 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttzxh\" (UniqueName: \"kubernetes.io/projected/a5eed773-3e19-43e5-b888-6defb74dcf9d-kube-api-access-ttzxh\") pod \"controller-manager-5877696bb8-bbssz\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.629866 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5ctw\" (UniqueName: \"kubernetes.io/projected/b5157834-16d9-4101-9510-5bb71f933c87-kube-api-access-p5ctw\") pod \"route-controller-manager-5948fdc94d-sksg5\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.779785 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:42 crc kubenswrapper[4814]: I0122 05:23:42.820688 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.126539 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5877696bb8-bbssz"] Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.230586 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5"] Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.839721 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" event={"ID":"b5157834-16d9-4101-9510-5bb71f933c87","Type":"ContainerStarted","Data":"57c4323142ece715882e2c2dbdc35aa1f3e563ba0019373522ac2e044b9321fc"} Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.839768 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" event={"ID":"b5157834-16d9-4101-9510-5bb71f933c87","Type":"ContainerStarted","Data":"67d18c8687472c6407570a9b5cd1cbb7964c9bf8109a0e09a724fd12dc6f2f2f"} Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.839895 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.841574 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" event={"ID":"a5eed773-3e19-43e5-b888-6defb74dcf9d","Type":"ContainerStarted","Data":"fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff"} Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.841603 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" event={"ID":"a5eed773-3e19-43e5-b888-6defb74dcf9d","Type":"ContainerStarted","Data":"32bef85d9412b60bfdf9bad78277bf81f44be93f3dabd73b93e88d491e006f24"} Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.842151 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.849600 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.858009 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" podStartSLOduration=3.85799319 podStartE2EDuration="3.85799319s" podCreationTimestamp="2026-01-22 05:23:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:23:43.85606204 +0000 UTC m=+309.939550255" watchObservedRunningTime="2026-01-22 05:23:43.85799319 +0000 UTC m=+309.941481405" Jan 22 05:23:43 crc kubenswrapper[4814]: I0122 05:23:43.874693 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" podStartSLOduration=3.8746789059999998 podStartE2EDuration="3.874678906s" podCreationTimestamp="2026-01-22 05:23:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:23:43.873945824 +0000 UTC m=+309.957434039" watchObservedRunningTime="2026-01-22 05:23:43.874678906 +0000 UTC m=+309.958167121" Jan 22 05:23:44 crc kubenswrapper[4814]: I0122 05:23:44.053429 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:23:50 crc kubenswrapper[4814]: I0122 05:23:50.192757 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.154576 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wz7gh"] Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.156652 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.158939 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.175873 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wz7gh"] Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.309174 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7c0e6cd-e22a-4560-b49c-95c421b55f58-utilities\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.309313 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72d7c\" (UniqueName: \"kubernetes.io/projected/e7c0e6cd-e22a-4560-b49c-95c421b55f58-kube-api-access-72d7c\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.309400 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7c0e6cd-e22a-4560-b49c-95c421b55f58-catalog-content\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.376071 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dmdkw"] Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.378492 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.386010 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.390304 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmdkw"] Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.412860 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72d7c\" (UniqueName: \"kubernetes.io/projected/e7c0e6cd-e22a-4560-b49c-95c421b55f58-kube-api-access-72d7c\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.413147 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7c0e6cd-e22a-4560-b49c-95c421b55f58-catalog-content\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.413298 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7c0e6cd-e22a-4560-b49c-95c421b55f58-utilities\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.413857 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e7c0e6cd-e22a-4560-b49c-95c421b55f58-catalog-content\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.413894 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e7c0e6cd-e22a-4560-b49c-95c421b55f58-utilities\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.438241 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72d7c\" (UniqueName: \"kubernetes.io/projected/e7c0e6cd-e22a-4560-b49c-95c421b55f58-kube-api-access-72d7c\") pod \"redhat-operators-wz7gh\" (UID: \"e7c0e6cd-e22a-4560-b49c-95c421b55f58\") " pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.489186 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.514803 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbmmt\" (UniqueName: \"kubernetes.io/projected/839ce24a-8b54-4604-9f62-0b9ada747d8f-kube-api-access-rbmmt\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.515219 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/839ce24a-8b54-4604-9f62-0b9ada747d8f-utilities\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.515259 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/839ce24a-8b54-4604-9f62-0b9ada747d8f-catalog-content\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.616409 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbmmt\" (UniqueName: \"kubernetes.io/projected/839ce24a-8b54-4604-9f62-0b9ada747d8f-kube-api-access-rbmmt\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.616463 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/839ce24a-8b54-4604-9f62-0b9ada747d8f-utilities\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.616486 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/839ce24a-8b54-4604-9f62-0b9ada747d8f-catalog-content\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.616996 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/839ce24a-8b54-4604-9f62-0b9ada747d8f-catalog-content\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.617251 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/839ce24a-8b54-4604-9f62-0b9ada747d8f-utilities\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.649379 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbmmt\" (UniqueName: \"kubernetes.io/projected/839ce24a-8b54-4604-9f62-0b9ada747d8f-kube-api-access-rbmmt\") pod \"redhat-marketplace-dmdkw\" (UID: \"839ce24a-8b54-4604-9f62-0b9ada747d8f\") " pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.712483 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.887975 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wz7gh"] Jan 22 05:23:57 crc kubenswrapper[4814]: I0122 05:23:57.930157 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wz7gh" event={"ID":"e7c0e6cd-e22a-4560-b49c-95c421b55f58","Type":"ContainerStarted","Data":"bb31b2367d39fff0621739dd0fc84f36b07d48f301e11680229e3f701f5610d0"} Jan 22 05:23:58 crc kubenswrapper[4814]: E0122 05:23:58.095592 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode7c0e6cd_e22a_4560_b49c_95c421b55f58.slice/crio-5751bd5e51ec33759a3fb31a1f6034a0585324bebd8ad2cf645a5da37648765c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode7c0e6cd_e22a_4560_b49c_95c421b55f58.slice/crio-conmon-5751bd5e51ec33759a3fb31a1f6034a0585324bebd8ad2cf645a5da37648765c.scope\": RecentStats: unable to find data in memory cache]" Jan 22 05:23:58 crc kubenswrapper[4814]: I0122 05:23:58.168753 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dmdkw"] Jan 22 05:23:58 crc kubenswrapper[4814]: W0122 05:23:58.180477 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod839ce24a_8b54_4604_9f62_0b9ada747d8f.slice/crio-317a8df27e9ec0e6923ea01ded0b85bdbf21b0e9aa73b38ba4c1648a7165b00a WatchSource:0}: Error finding container 317a8df27e9ec0e6923ea01ded0b85bdbf21b0e9aa73b38ba4c1648a7165b00a: Status 404 returned error can't find the container with id 317a8df27e9ec0e6923ea01ded0b85bdbf21b0e9aa73b38ba4c1648a7165b00a Jan 22 05:23:58 crc kubenswrapper[4814]: I0122 05:23:58.939907 4814 generic.go:334] "Generic (PLEG): container finished" podID="e7c0e6cd-e22a-4560-b49c-95c421b55f58" containerID="5751bd5e51ec33759a3fb31a1f6034a0585324bebd8ad2cf645a5da37648765c" exitCode=0 Jan 22 05:23:58 crc kubenswrapper[4814]: I0122 05:23:58.940591 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wz7gh" event={"ID":"e7c0e6cd-e22a-4560-b49c-95c421b55f58","Type":"ContainerDied","Data":"5751bd5e51ec33759a3fb31a1f6034a0585324bebd8ad2cf645a5da37648765c"} Jan 22 05:23:58 crc kubenswrapper[4814]: I0122 05:23:58.945710 4814 generic.go:334] "Generic (PLEG): container finished" podID="839ce24a-8b54-4604-9f62-0b9ada747d8f" containerID="1e2eb05267d0792dc96b67fdfb0ce349c08a32e14f83d43ef3227970b897287c" exitCode=0 Jan 22 05:23:58 crc kubenswrapper[4814]: I0122 05:23:58.945769 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmdkw" event={"ID":"839ce24a-8b54-4604-9f62-0b9ada747d8f","Type":"ContainerDied","Data":"1e2eb05267d0792dc96b67fdfb0ce349c08a32e14f83d43ef3227970b897287c"} Jan 22 05:23:58 crc kubenswrapper[4814]: I0122 05:23:58.949434 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmdkw" event={"ID":"839ce24a-8b54-4604-9f62-0b9ada747d8f","Type":"ContainerStarted","Data":"317a8df27e9ec0e6923ea01ded0b85bdbf21b0e9aa73b38ba4c1648a7165b00a"} Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.554697 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gmgz8"] Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.560463 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.563808 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.579993 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gmgz8"] Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.748423 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldczz\" (UniqueName: \"kubernetes.io/projected/b9decba6-bec3-4a0f-9d81-37b513a97ad2-kube-api-access-ldczz\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.748476 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9decba6-bec3-4a0f-9d81-37b513a97ad2-utilities\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.748507 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9decba6-bec3-4a0f-9d81-37b513a97ad2-catalog-content\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.761593 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g5zdq"] Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.763498 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.770039 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g5zdq"] Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.776682 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.849699 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9decba6-bec3-4a0f-9d81-37b513a97ad2-utilities\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.849757 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldczz\" (UniqueName: \"kubernetes.io/projected/b9decba6-bec3-4a0f-9d81-37b513a97ad2-kube-api-access-ldczz\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.849807 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9decba6-bec3-4a0f-9d81-37b513a97ad2-catalog-content\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.850390 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9decba6-bec3-4a0f-9d81-37b513a97ad2-catalog-content\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.850760 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9decba6-bec3-4a0f-9d81-37b513a97ad2-utilities\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.874594 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldczz\" (UniqueName: \"kubernetes.io/projected/b9decba6-bec3-4a0f-9d81-37b513a97ad2-kube-api-access-ldczz\") pod \"certified-operators-gmgz8\" (UID: \"b9decba6-bec3-4a0f-9d81-37b513a97ad2\") " pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.892292 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.954100 4814 generic.go:334] "Generic (PLEG): container finished" podID="839ce24a-8b54-4604-9f62-0b9ada747d8f" containerID="d1ed3417b9f278d61fb54bd7ad2adeeedc2b237dc067c7e055a5b49d757044f2" exitCode=0 Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.954168 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmdkw" event={"ID":"839ce24a-8b54-4604-9f62-0b9ada747d8f","Type":"ContainerDied","Data":"d1ed3417b9f278d61fb54bd7ad2adeeedc2b237dc067c7e055a5b49d757044f2"} Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.954964 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmx86\" (UniqueName: \"kubernetes.io/projected/56872458-6815-4696-9016-c033137187f0-kube-api-access-nmx86\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.955026 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-catalog-content\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.955083 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-utilities\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:23:59 crc kubenswrapper[4814]: I0122 05:23:59.959697 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wz7gh" event={"ID":"e7c0e6cd-e22a-4560-b49c-95c421b55f58","Type":"ContainerStarted","Data":"d7268e24260b8eec8ff94083bc0e32fa12ba6c72c2376d27afb6682ad4ca3fb3"} Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.056057 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmx86\" (UniqueName: \"kubernetes.io/projected/56872458-6815-4696-9016-c033137187f0-kube-api-access-nmx86\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.056417 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-catalog-content\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.056885 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-catalog-content\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.056959 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-utilities\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.057298 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-utilities\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.077488 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmx86\" (UniqueName: \"kubernetes.io/projected/56872458-6815-4696-9016-c033137187f0-kube-api-access-nmx86\") pod \"community-operators-g5zdq\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.128593 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.302652 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gmgz8"] Jan 22 05:24:00 crc kubenswrapper[4814]: W0122 05:24:00.309693 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9decba6_bec3_4a0f_9d81_37b513a97ad2.slice/crio-6adf46ed323d072dc16ca0c1f43cd654452fcee57512c89e1f26be815e18132a WatchSource:0}: Error finding container 6adf46ed323d072dc16ca0c1f43cd654452fcee57512c89e1f26be815e18132a: Status 404 returned error can't find the container with id 6adf46ed323d072dc16ca0c1f43cd654452fcee57512c89e1f26be815e18132a Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.459498 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gdnn2"] Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.460466 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.485311 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gdnn2"] Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.571402 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6g6k\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-kube-api-access-f6g6k\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.571476 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.571514 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.571531 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-bound-sa-token\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.571551 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-trusted-ca\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.571572 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-registry-tls\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.571614 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.571654 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-registry-certificates\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.599397 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.627205 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g5zdq"] Jan 22 05:24:00 crc kubenswrapper[4814]: W0122 05:24:00.647834 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56872458_6815_4696_9016_c033137187f0.slice/crio-1f594f8a2f7a10576d76cc7e4caadbfd1c87e77edb89623e743e2a1b51291851 WatchSource:0}: Error finding container 1f594f8a2f7a10576d76cc7e4caadbfd1c87e77edb89623e743e2a1b51291851: Status 404 returned error can't find the container with id 1f594f8a2f7a10576d76cc7e4caadbfd1c87e77edb89623e743e2a1b51291851 Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.672777 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-registry-certificates\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.672838 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6g6k\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-kube-api-access-f6g6k\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.672862 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.672891 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.672907 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-bound-sa-token\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.672926 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-trusted-ca\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.672948 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-registry-tls\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.673835 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.674760 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-trusted-ca\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.675015 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-registry-certificates\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.678503 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-registry-tls\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.680507 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.693491 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6g6k\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-kube-api-access-f6g6k\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.698851 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f-bound-sa-token\") pod \"image-registry-66df7c8f76-gdnn2\" (UID: \"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f\") " pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.796876 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.972358 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dmdkw" event={"ID":"839ce24a-8b54-4604-9f62-0b9ada747d8f","Type":"ContainerStarted","Data":"e5bcbfa364bed0a5904358f1d9d07eca94c43212f19ab569edebc4d5145e88a1"} Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.979017 4814 generic.go:334] "Generic (PLEG): container finished" podID="e7c0e6cd-e22a-4560-b49c-95c421b55f58" containerID="d7268e24260b8eec8ff94083bc0e32fa12ba6c72c2376d27afb6682ad4ca3fb3" exitCode=0 Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.979087 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wz7gh" event={"ID":"e7c0e6cd-e22a-4560-b49c-95c421b55f58","Type":"ContainerDied","Data":"d7268e24260b8eec8ff94083bc0e32fa12ba6c72c2376d27afb6682ad4ca3fb3"} Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.985987 4814 generic.go:334] "Generic (PLEG): container finished" podID="b9decba6-bec3-4a0f-9d81-37b513a97ad2" containerID="f59b2ac3a0e7afc17799f9d30106a286e4adc1a5816a0ae7d2a35b1ce5cf129c" exitCode=0 Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.986112 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmgz8" event={"ID":"b9decba6-bec3-4a0f-9d81-37b513a97ad2","Type":"ContainerDied","Data":"f59b2ac3a0e7afc17799f9d30106a286e4adc1a5816a0ae7d2a35b1ce5cf129c"} Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.986178 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmgz8" event={"ID":"b9decba6-bec3-4a0f-9d81-37b513a97ad2","Type":"ContainerStarted","Data":"6adf46ed323d072dc16ca0c1f43cd654452fcee57512c89e1f26be815e18132a"} Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.989695 4814 generic.go:334] "Generic (PLEG): container finished" podID="56872458-6815-4696-9016-c033137187f0" containerID="709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36" exitCode=0 Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.989735 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5zdq" event={"ID":"56872458-6815-4696-9016-c033137187f0","Type":"ContainerDied","Data":"709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36"} Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.989768 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5zdq" event={"ID":"56872458-6815-4696-9016-c033137187f0","Type":"ContainerStarted","Data":"1f594f8a2f7a10576d76cc7e4caadbfd1c87e77edb89623e743e2a1b51291851"} Jan 22 05:24:00 crc kubenswrapper[4814]: I0122 05:24:00.992666 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dmdkw" podStartSLOduration=2.54062985 podStartE2EDuration="3.992650704s" podCreationTimestamp="2026-01-22 05:23:57 +0000 UTC" firstStartedPulling="2026-01-22 05:23:58.949581457 +0000 UTC m=+325.033069702" lastFinishedPulling="2026-01-22 05:24:00.401602341 +0000 UTC m=+326.485090556" observedRunningTime="2026-01-22 05:24:00.990324152 +0000 UTC m=+327.073812367" watchObservedRunningTime="2026-01-22 05:24:00.992650704 +0000 UTC m=+327.076138919" Jan 22 05:24:01 crc kubenswrapper[4814]: I0122 05:24:01.187181 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gdnn2"] Jan 22 05:24:01 crc kubenswrapper[4814]: W0122 05:24:01.197841 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e7bfc4a_4ba2_4ab0_be68_2f4689c7d86f.slice/crio-d2f7d9ab68e99af90f03434e20b43919a4920d408e11267302d8ff8115ba97ca WatchSource:0}: Error finding container d2f7d9ab68e99af90f03434e20b43919a4920d408e11267302d8ff8115ba97ca: Status 404 returned error can't find the container with id d2f7d9ab68e99af90f03434e20b43919a4920d408e11267302d8ff8115ba97ca Jan 22 05:24:01 crc kubenswrapper[4814]: I0122 05:24:01.999291 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wz7gh" event={"ID":"e7c0e6cd-e22a-4560-b49c-95c421b55f58","Type":"ContainerStarted","Data":"b0850eb379f4a554b0410578a8118a843a895beb4c89a2a5f75bbee084c6a388"} Jan 22 05:24:02 crc kubenswrapper[4814]: I0122 05:24:02.000847 4814 generic.go:334] "Generic (PLEG): container finished" podID="b9decba6-bec3-4a0f-9d81-37b513a97ad2" containerID="0ddfeffa4aa760b162b1a73354422917957396f69124f8782c1562133db68cbf" exitCode=0 Jan 22 05:24:02 crc kubenswrapper[4814]: I0122 05:24:02.000923 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmgz8" event={"ID":"b9decba6-bec3-4a0f-9d81-37b513a97ad2","Type":"ContainerDied","Data":"0ddfeffa4aa760b162b1a73354422917957396f69124f8782c1562133db68cbf"} Jan 22 05:24:02 crc kubenswrapper[4814]: I0122 05:24:02.003843 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5zdq" event={"ID":"56872458-6815-4696-9016-c033137187f0","Type":"ContainerStarted","Data":"64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd"} Jan 22 05:24:02 crc kubenswrapper[4814]: I0122 05:24:02.005813 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" event={"ID":"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f","Type":"ContainerStarted","Data":"e3f5f7aeaacfa2f27473a5e101d3ad51eb25a2c3c17b011e84a200df4c9d4a07"} Jan 22 05:24:02 crc kubenswrapper[4814]: I0122 05:24:02.005845 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" event={"ID":"8e7bfc4a-4ba2-4ab0-be68-2f4689c7d86f","Type":"ContainerStarted","Data":"d2f7d9ab68e99af90f03434e20b43919a4920d408e11267302d8ff8115ba97ca"} Jan 22 05:24:02 crc kubenswrapper[4814]: I0122 05:24:02.044990 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wz7gh" podStartSLOduration=2.650873087 podStartE2EDuration="5.044972232s" podCreationTimestamp="2026-01-22 05:23:57 +0000 UTC" firstStartedPulling="2026-01-22 05:23:58.943433267 +0000 UTC m=+325.026921512" lastFinishedPulling="2026-01-22 05:24:01.337532442 +0000 UTC m=+327.421020657" observedRunningTime="2026-01-22 05:24:02.029872496 +0000 UTC m=+328.113360711" watchObservedRunningTime="2026-01-22 05:24:02.044972232 +0000 UTC m=+328.128460447" Jan 22 05:24:02 crc kubenswrapper[4814]: I0122 05:24:02.078743 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" podStartSLOduration=2.078729423 podStartE2EDuration="2.078729423s" podCreationTimestamp="2026-01-22 05:24:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:24:02.075585006 +0000 UTC m=+328.159073211" watchObservedRunningTime="2026-01-22 05:24:02.078729423 +0000 UTC m=+328.162217638" Jan 22 05:24:03 crc kubenswrapper[4814]: I0122 05:24:03.013303 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmgz8" event={"ID":"b9decba6-bec3-4a0f-9d81-37b513a97ad2","Type":"ContainerStarted","Data":"b565ffcaac7c7c760d1fa47759e5371b59563d4f3f0a31ffb7aff22e59fd8491"} Jan 22 05:24:03 crc kubenswrapper[4814]: I0122 05:24:03.016050 4814 generic.go:334] "Generic (PLEG): container finished" podID="56872458-6815-4696-9016-c033137187f0" containerID="64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd" exitCode=0 Jan 22 05:24:03 crc kubenswrapper[4814]: I0122 05:24:03.016098 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5zdq" event={"ID":"56872458-6815-4696-9016-c033137187f0","Type":"ContainerDied","Data":"64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd"} Jan 22 05:24:03 crc kubenswrapper[4814]: I0122 05:24:03.016554 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:03 crc kubenswrapper[4814]: I0122 05:24:03.040724 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gmgz8" podStartSLOduration=2.623808305 podStartE2EDuration="4.040704052s" podCreationTimestamp="2026-01-22 05:23:59 +0000 UTC" firstStartedPulling="2026-01-22 05:24:00.987247726 +0000 UTC m=+327.070735941" lastFinishedPulling="2026-01-22 05:24:02.404143443 +0000 UTC m=+328.487631688" observedRunningTime="2026-01-22 05:24:03.039260678 +0000 UTC m=+329.122748933" watchObservedRunningTime="2026-01-22 05:24:03.040704052 +0000 UTC m=+329.124192267" Jan 22 05:24:04 crc kubenswrapper[4814]: I0122 05:24:04.024035 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5zdq" event={"ID":"56872458-6815-4696-9016-c033137187f0","Type":"ContainerStarted","Data":"4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654"} Jan 22 05:24:04 crc kubenswrapper[4814]: I0122 05:24:04.048185 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g5zdq" podStartSLOduration=2.576246028 podStartE2EDuration="5.048168014s" podCreationTimestamp="2026-01-22 05:23:59 +0000 UTC" firstStartedPulling="2026-01-22 05:24:00.992950493 +0000 UTC m=+327.076438708" lastFinishedPulling="2026-01-22 05:24:03.464872479 +0000 UTC m=+329.548360694" observedRunningTime="2026-01-22 05:24:04.04703766 +0000 UTC m=+330.130525885" watchObservedRunningTime="2026-01-22 05:24:04.048168014 +0000 UTC m=+330.131656229" Jan 22 05:24:07 crc kubenswrapper[4814]: I0122 05:24:07.489539 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:24:07 crc kubenswrapper[4814]: I0122 05:24:07.490863 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:24:07 crc kubenswrapper[4814]: I0122 05:24:07.713322 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:24:07 crc kubenswrapper[4814]: I0122 05:24:07.713423 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:24:07 crc kubenswrapper[4814]: I0122 05:24:07.759689 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:24:08 crc kubenswrapper[4814]: I0122 05:24:08.089802 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dmdkw" Jan 22 05:24:08 crc kubenswrapper[4814]: I0122 05:24:08.542271 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wz7gh" podUID="e7c0e6cd-e22a-4560-b49c-95c421b55f58" containerName="registry-server" probeResult="failure" output=< Jan 22 05:24:08 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:24:08 crc kubenswrapper[4814]: > Jan 22 05:24:09 crc kubenswrapper[4814]: I0122 05:24:09.893037 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:24:09 crc kubenswrapper[4814]: I0122 05:24:09.893388 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:24:09 crc kubenswrapper[4814]: I0122 05:24:09.971374 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:24:10 crc kubenswrapper[4814]: I0122 05:24:10.118688 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gmgz8" Jan 22 05:24:10 crc kubenswrapper[4814]: I0122 05:24:10.128966 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:10 crc kubenswrapper[4814]: I0122 05:24:10.129042 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:10 crc kubenswrapper[4814]: I0122 05:24:10.182521 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:11 crc kubenswrapper[4814]: I0122 05:24:11.110765 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g5zdq" Jan 22 05:24:13 crc kubenswrapper[4814]: I0122 05:24:13.577374 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5"] Jan 22 05:24:13 crc kubenswrapper[4814]: I0122 05:24:13.577711 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" podUID="b5157834-16d9-4101-9510-5bb71f933c87" containerName="route-controller-manager" containerID="cri-o://57c4323142ece715882e2c2dbdc35aa1f3e563ba0019373522ac2e044b9321fc" gracePeriod=30 Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.099033 4814 generic.go:334] "Generic (PLEG): container finished" podID="b5157834-16d9-4101-9510-5bb71f933c87" containerID="57c4323142ece715882e2c2dbdc35aa1f3e563ba0019373522ac2e044b9321fc" exitCode=0 Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.099143 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" event={"ID":"b5157834-16d9-4101-9510-5bb71f933c87","Type":"ContainerDied","Data":"57c4323142ece715882e2c2dbdc35aa1f3e563ba0019373522ac2e044b9321fc"} Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.162295 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.204404 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq"] Jan 22 05:24:15 crc kubenswrapper[4814]: E0122 05:24:15.204654 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5157834-16d9-4101-9510-5bb71f933c87" containerName="route-controller-manager" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.204666 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5157834-16d9-4101-9510-5bb71f933c87" containerName="route-controller-manager" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.204793 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5157834-16d9-4101-9510-5bb71f933c87" containerName="route-controller-manager" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.205186 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.214610 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq"] Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.313859 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5157834-16d9-4101-9510-5bb71f933c87-serving-cert\") pod \"b5157834-16d9-4101-9510-5bb71f933c87\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.313965 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-client-ca\") pod \"b5157834-16d9-4101-9510-5bb71f933c87\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.314017 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5ctw\" (UniqueName: \"kubernetes.io/projected/b5157834-16d9-4101-9510-5bb71f933c87-kube-api-access-p5ctw\") pod \"b5157834-16d9-4101-9510-5bb71f933c87\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.314111 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-config\") pod \"b5157834-16d9-4101-9510-5bb71f933c87\" (UID: \"b5157834-16d9-4101-9510-5bb71f933c87\") " Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.314410 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-serving-cert\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.314491 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45mfb\" (UniqueName: \"kubernetes.io/projected/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-kube-api-access-45mfb\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.314582 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-config\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.314737 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-client-ca\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.314942 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-client-ca" (OuterVolumeSpecName: "client-ca") pod "b5157834-16d9-4101-9510-5bb71f933c87" (UID: "b5157834-16d9-4101-9510-5bb71f933c87"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.315349 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-config" (OuterVolumeSpecName: "config") pod "b5157834-16d9-4101-9510-5bb71f933c87" (UID: "b5157834-16d9-4101-9510-5bb71f933c87"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.323670 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5157834-16d9-4101-9510-5bb71f933c87-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b5157834-16d9-4101-9510-5bb71f933c87" (UID: "b5157834-16d9-4101-9510-5bb71f933c87"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.324960 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5157834-16d9-4101-9510-5bb71f933c87-kube-api-access-p5ctw" (OuterVolumeSpecName: "kube-api-access-p5ctw") pod "b5157834-16d9-4101-9510-5bb71f933c87" (UID: "b5157834-16d9-4101-9510-5bb71f933c87"). InnerVolumeSpecName "kube-api-access-p5ctw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.416588 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-config\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.417088 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-client-ca\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.417301 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-serving-cert\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.417370 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45mfb\" (UniqueName: \"kubernetes.io/projected/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-kube-api-access-45mfb\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.417458 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5157834-16d9-4101-9510-5bb71f933c87-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.417475 4814 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.417488 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5ctw\" (UniqueName: \"kubernetes.io/projected/b5157834-16d9-4101-9510-5bb71f933c87-kube-api-access-p5ctw\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.417502 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5157834-16d9-4101-9510-5bb71f933c87-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.418293 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-config\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.420643 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-client-ca\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.437862 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-serving-cert\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.445801 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45mfb\" (UniqueName: \"kubernetes.io/projected/09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f-kube-api-access-45mfb\") pod \"route-controller-manager-d69f788fd-42cqq\" (UID: \"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f\") " pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.533326 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:15 crc kubenswrapper[4814]: I0122 05:24:15.944548 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq"] Jan 22 05:24:15 crc kubenswrapper[4814]: W0122 05:24:15.950473 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09cb0b4c_2c2f_4b1b_8bf2_cf5690ad105f.slice/crio-8afbf227ef1c22b81fb516ff05a74c095459c3ed31fc045b7e7efaafaaf29d18 WatchSource:0}: Error finding container 8afbf227ef1c22b81fb516ff05a74c095459c3ed31fc045b7e7efaafaaf29d18: Status 404 returned error can't find the container with id 8afbf227ef1c22b81fb516ff05a74c095459c3ed31fc045b7e7efaafaaf29d18 Jan 22 05:24:16 crc kubenswrapper[4814]: I0122 05:24:16.108804 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" event={"ID":"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f","Type":"ContainerStarted","Data":"8afbf227ef1c22b81fb516ff05a74c095459c3ed31fc045b7e7efaafaaf29d18"} Jan 22 05:24:16 crc kubenswrapper[4814]: I0122 05:24:16.111670 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" event={"ID":"b5157834-16d9-4101-9510-5bb71f933c87","Type":"ContainerDied","Data":"67d18c8687472c6407570a9b5cd1cbb7964c9bf8109a0e09a724fd12dc6f2f2f"} Jan 22 05:24:16 crc kubenswrapper[4814]: I0122 05:24:16.111748 4814 scope.go:117] "RemoveContainer" containerID="57c4323142ece715882e2c2dbdc35aa1f3e563ba0019373522ac2e044b9321fc" Jan 22 05:24:16 crc kubenswrapper[4814]: I0122 05:24:16.112006 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5" Jan 22 05:24:16 crc kubenswrapper[4814]: I0122 05:24:16.183090 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5"] Jan 22 05:24:16 crc kubenswrapper[4814]: I0122 05:24:16.186901 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5948fdc94d-sksg5"] Jan 22 05:24:16 crc kubenswrapper[4814]: I0122 05:24:16.354622 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5157834-16d9-4101-9510-5bb71f933c87" path="/var/lib/kubelet/pods/b5157834-16d9-4101-9510-5bb71f933c87/volumes" Jan 22 05:24:17 crc kubenswrapper[4814]: I0122 05:24:17.123547 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" event={"ID":"09cb0b4c-2c2f-4b1b-8bf2-cf5690ad105f","Type":"ContainerStarted","Data":"8914647e576e3297258618f49359fb63cf7097bc42db6010b19fc60f347731eb"} Jan 22 05:24:17 crc kubenswrapper[4814]: I0122 05:24:17.126532 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:17 crc kubenswrapper[4814]: I0122 05:24:17.137755 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" Jan 22 05:24:17 crc kubenswrapper[4814]: I0122 05:24:17.152266 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-d69f788fd-42cqq" podStartSLOduration=4.152235543 podStartE2EDuration="4.152235543s" podCreationTimestamp="2026-01-22 05:24:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:24:17.150989344 +0000 UTC m=+343.234477599" watchObservedRunningTime="2026-01-22 05:24:17.152235543 +0000 UTC m=+343.235723788" Jan 22 05:24:17 crc kubenswrapper[4814]: I0122 05:24:17.551211 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:24:17 crc kubenswrapper[4814]: I0122 05:24:17.591792 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wz7gh" Jan 22 05:24:19 crc kubenswrapper[4814]: I0122 05:24:19.614724 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:24:19 crc kubenswrapper[4814]: I0122 05:24:19.615229 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:24:20 crc kubenswrapper[4814]: I0122 05:24:20.808811 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-gdnn2" Jan 22 05:24:20 crc kubenswrapper[4814]: I0122 05:24:20.890469 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kt2c2"] Jan 22 05:24:33 crc kubenswrapper[4814]: I0122 05:24:33.538124 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5877696bb8-bbssz"] Jan 22 05:24:33 crc kubenswrapper[4814]: I0122 05:24:33.538860 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" podUID="a5eed773-3e19-43e5-b888-6defb74dcf9d" containerName="controller-manager" containerID="cri-o://fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff" gracePeriod=30 Jan 22 05:24:33 crc kubenswrapper[4814]: I0122 05:24:33.951447 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.078966 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5eed773-3e19-43e5-b888-6defb74dcf9d-serving-cert\") pod \"a5eed773-3e19-43e5-b888-6defb74dcf9d\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.079019 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-client-ca\") pod \"a5eed773-3e19-43e5-b888-6defb74dcf9d\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.079071 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-proxy-ca-bundles\") pod \"a5eed773-3e19-43e5-b888-6defb74dcf9d\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.079118 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttzxh\" (UniqueName: \"kubernetes.io/projected/a5eed773-3e19-43e5-b888-6defb74dcf9d-kube-api-access-ttzxh\") pod \"a5eed773-3e19-43e5-b888-6defb74dcf9d\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.079137 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-config\") pod \"a5eed773-3e19-43e5-b888-6defb74dcf9d\" (UID: \"a5eed773-3e19-43e5-b888-6defb74dcf9d\") " Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.080047 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-config" (OuterVolumeSpecName: "config") pod "a5eed773-3e19-43e5-b888-6defb74dcf9d" (UID: "a5eed773-3e19-43e5-b888-6defb74dcf9d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.081246 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a5eed773-3e19-43e5-b888-6defb74dcf9d" (UID: "a5eed773-3e19-43e5-b888-6defb74dcf9d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.081440 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-client-ca" (OuterVolumeSpecName: "client-ca") pod "a5eed773-3e19-43e5-b888-6defb74dcf9d" (UID: "a5eed773-3e19-43e5-b888-6defb74dcf9d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.085718 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5eed773-3e19-43e5-b888-6defb74dcf9d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a5eed773-3e19-43e5-b888-6defb74dcf9d" (UID: "a5eed773-3e19-43e5-b888-6defb74dcf9d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.098964 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5eed773-3e19-43e5-b888-6defb74dcf9d-kube-api-access-ttzxh" (OuterVolumeSpecName: "kube-api-access-ttzxh") pod "a5eed773-3e19-43e5-b888-6defb74dcf9d" (UID: "a5eed773-3e19-43e5-b888-6defb74dcf9d"). InnerVolumeSpecName "kube-api-access-ttzxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.180618 4814 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.180662 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttzxh\" (UniqueName: \"kubernetes.io/projected/a5eed773-3e19-43e5-b888-6defb74dcf9d-kube-api-access-ttzxh\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.180675 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.180684 4814 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a5eed773-3e19-43e5-b888-6defb74dcf9d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.180692 4814 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a5eed773-3e19-43e5-b888-6defb74dcf9d-client-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.226228 4814 generic.go:334] "Generic (PLEG): container finished" podID="a5eed773-3e19-43e5-b888-6defb74dcf9d" containerID="fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff" exitCode=0 Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.226278 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" event={"ID":"a5eed773-3e19-43e5-b888-6defb74dcf9d","Type":"ContainerDied","Data":"fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff"} Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.226303 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" event={"ID":"a5eed773-3e19-43e5-b888-6defb74dcf9d","Type":"ContainerDied","Data":"32bef85d9412b60bfdf9bad78277bf81f44be93f3dabd73b93e88d491e006f24"} Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.226321 4814 scope.go:117] "RemoveContainer" containerID="fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.226408 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5877696bb8-bbssz" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.252394 4814 scope.go:117] "RemoveContainer" containerID="fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff" Jan 22 05:24:34 crc kubenswrapper[4814]: E0122 05:24:34.255594 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff\": container with ID starting with fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff not found: ID does not exist" containerID="fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.255661 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff"} err="failed to get container status \"fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff\": rpc error: code = NotFound desc = could not find container \"fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff\": container with ID starting with fdff93da25b3cccea0dd49f4cb861334210eb664a1529ba6815cfef4b1eb2cff not found: ID does not exist" Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.262145 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5877696bb8-bbssz"] Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.268369 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5877696bb8-bbssz"] Jan 22 05:24:34 crc kubenswrapper[4814]: I0122 05:24:34.351570 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5eed773-3e19-43e5-b888-6defb74dcf9d" path="/var/lib/kubelet/pods/a5eed773-3e19-43e5-b888-6defb74dcf9d/volumes" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.502202 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5c9df99b5b-42bxr"] Jan 22 05:24:35 crc kubenswrapper[4814]: E0122 05:24:35.502487 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5eed773-3e19-43e5-b888-6defb74dcf9d" containerName="controller-manager" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.502505 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5eed773-3e19-43e5-b888-6defb74dcf9d" containerName="controller-manager" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.502688 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5eed773-3e19-43e5-b888-6defb74dcf9d" containerName="controller-manager" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.503250 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.505563 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.507272 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.507900 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.508179 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.508378 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.510704 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.521952 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.524371 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5c9df99b5b-42bxr"] Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.598592 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-proxy-ca-bundles\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.598910 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-serving-cert\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.599040 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-client-ca\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.599133 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgllp\" (UniqueName: \"kubernetes.io/projected/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-kube-api-access-jgllp\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.599229 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-config\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.700109 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-config\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.700186 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-proxy-ca-bundles\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.700239 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-serving-cert\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.700288 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-client-ca\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.700315 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgllp\" (UniqueName: \"kubernetes.io/projected/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-kube-api-access-jgllp\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.701342 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-client-ca\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.701619 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-proxy-ca-bundles\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.702214 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-config\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.705864 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-serving-cert\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.723222 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgllp\" (UniqueName: \"kubernetes.io/projected/bb0f53c8-ddaf-4141-88ac-7b8851399bf5-kube-api-access-jgllp\") pod \"controller-manager-5c9df99b5b-42bxr\" (UID: \"bb0f53c8-ddaf-4141-88ac-7b8851399bf5\") " pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:35 crc kubenswrapper[4814]: I0122 05:24:35.820009 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:36 crc kubenswrapper[4814]: I0122 05:24:36.165520 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5c9df99b5b-42bxr"] Jan 22 05:24:36 crc kubenswrapper[4814]: W0122 05:24:36.173183 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb0f53c8_ddaf_4141_88ac_7b8851399bf5.slice/crio-7efe53a25c49eaa8476ab8f2b7009cfa9d2d8933524090da5911accbcc6556cc WatchSource:0}: Error finding container 7efe53a25c49eaa8476ab8f2b7009cfa9d2d8933524090da5911accbcc6556cc: Status 404 returned error can't find the container with id 7efe53a25c49eaa8476ab8f2b7009cfa9d2d8933524090da5911accbcc6556cc Jan 22 05:24:36 crc kubenswrapper[4814]: I0122 05:24:36.240417 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" event={"ID":"bb0f53c8-ddaf-4141-88ac-7b8851399bf5","Type":"ContainerStarted","Data":"7efe53a25c49eaa8476ab8f2b7009cfa9d2d8933524090da5911accbcc6556cc"} Jan 22 05:24:37 crc kubenswrapper[4814]: I0122 05:24:37.245750 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" event={"ID":"bb0f53c8-ddaf-4141-88ac-7b8851399bf5","Type":"ContainerStarted","Data":"f77fdb8b9b86823b8a0e1dc4deec732bb222fdc0a34231346f3c3670e0f5c13e"} Jan 22 05:24:37 crc kubenswrapper[4814]: I0122 05:24:37.246384 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:37 crc kubenswrapper[4814]: I0122 05:24:37.251098 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" Jan 22 05:24:37 crc kubenswrapper[4814]: I0122 05:24:37.267302 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5c9df99b5b-42bxr" podStartSLOduration=4.267280952 podStartE2EDuration="4.267280952s" podCreationTimestamp="2026-01-22 05:24:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:24:37.264360812 +0000 UTC m=+363.347849027" watchObservedRunningTime="2026-01-22 05:24:37.267280952 +0000 UTC m=+363.350769167" Jan 22 05:24:45 crc kubenswrapper[4814]: I0122 05:24:45.943095 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" podUID="29aae90f-3db5-4e31-a13e-35049f8ff2de" containerName="registry" containerID="cri-o://3b7ffe7b7496385f394bc2049cf61899beef070aefbf52307b0f132997343822" gracePeriod=30 Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.301619 4814 generic.go:334] "Generic (PLEG): container finished" podID="29aae90f-3db5-4e31-a13e-35049f8ff2de" containerID="3b7ffe7b7496385f394bc2049cf61899beef070aefbf52307b0f132997343822" exitCode=0 Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.301670 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" event={"ID":"29aae90f-3db5-4e31-a13e-35049f8ff2de","Type":"ContainerDied","Data":"3b7ffe7b7496385f394bc2049cf61899beef070aefbf52307b0f132997343822"} Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.434635 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.576528 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-tls\") pod \"29aae90f-3db5-4e31-a13e-35049f8ff2de\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.576718 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"29aae90f-3db5-4e31-a13e-35049f8ff2de\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.576748 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-trusted-ca\") pod \"29aae90f-3db5-4e31-a13e-35049f8ff2de\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.576771 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/29aae90f-3db5-4e31-a13e-35049f8ff2de-ca-trust-extracted\") pod \"29aae90f-3db5-4e31-a13e-35049f8ff2de\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.576788 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-certificates\") pod \"29aae90f-3db5-4e31-a13e-35049f8ff2de\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.577458 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "29aae90f-3db5-4e31-a13e-35049f8ff2de" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.577515 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "29aae90f-3db5-4e31-a13e-35049f8ff2de" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.577703 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqqnb\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-kube-api-access-pqqnb\") pod \"29aae90f-3db5-4e31-a13e-35049f8ff2de\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.577810 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/29aae90f-3db5-4e31-a13e-35049f8ff2de-installation-pull-secrets\") pod \"29aae90f-3db5-4e31-a13e-35049f8ff2de\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.577868 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-bound-sa-token\") pod \"29aae90f-3db5-4e31-a13e-35049f8ff2de\" (UID: \"29aae90f-3db5-4e31-a13e-35049f8ff2de\") " Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.578389 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.578406 4814 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.589255 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-kube-api-access-pqqnb" (OuterVolumeSpecName: "kube-api-access-pqqnb") pod "29aae90f-3db5-4e31-a13e-35049f8ff2de" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de"). InnerVolumeSpecName "kube-api-access-pqqnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.589343 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29aae90f-3db5-4e31-a13e-35049f8ff2de-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "29aae90f-3db5-4e31-a13e-35049f8ff2de" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.589589 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "29aae90f-3db5-4e31-a13e-35049f8ff2de" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.594614 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "29aae90f-3db5-4e31-a13e-35049f8ff2de" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.599278 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "29aae90f-3db5-4e31-a13e-35049f8ff2de" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.604228 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29aae90f-3db5-4e31-a13e-35049f8ff2de-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "29aae90f-3db5-4e31-a13e-35049f8ff2de" (UID: "29aae90f-3db5-4e31-a13e-35049f8ff2de"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.679772 4814 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/29aae90f-3db5-4e31-a13e-35049f8ff2de-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.679805 4814 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.679816 4814 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.679826 4814 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/29aae90f-3db5-4e31-a13e-35049f8ff2de-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:46 crc kubenswrapper[4814]: I0122 05:24:46.679836 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqqnb\" (UniqueName: \"kubernetes.io/projected/29aae90f-3db5-4e31-a13e-35049f8ff2de-kube-api-access-pqqnb\") on node \"crc\" DevicePath \"\"" Jan 22 05:24:47 crc kubenswrapper[4814]: I0122 05:24:47.310710 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" event={"ID":"29aae90f-3db5-4e31-a13e-35049f8ff2de","Type":"ContainerDied","Data":"332f5aeed2d509187bcae51bef3684639d6522adf6fc56adedca1b486a77b1a9"} Jan 22 05:24:47 crc kubenswrapper[4814]: I0122 05:24:47.310971 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kt2c2" Jan 22 05:24:47 crc kubenswrapper[4814]: I0122 05:24:47.312222 4814 scope.go:117] "RemoveContainer" containerID="3b7ffe7b7496385f394bc2049cf61899beef070aefbf52307b0f132997343822" Jan 22 05:24:47 crc kubenswrapper[4814]: I0122 05:24:47.363151 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kt2c2"] Jan 22 05:24:47 crc kubenswrapper[4814]: I0122 05:24:47.385173 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kt2c2"] Jan 22 05:24:48 crc kubenswrapper[4814]: I0122 05:24:48.352060 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29aae90f-3db5-4e31-a13e-35049f8ff2de" path="/var/lib/kubelet/pods/29aae90f-3db5-4e31-a13e-35049f8ff2de/volumes" Jan 22 05:24:49 crc kubenswrapper[4814]: I0122 05:24:49.613789 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:24:49 crc kubenswrapper[4814]: I0122 05:24:49.613924 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:25:19 crc kubenswrapper[4814]: I0122 05:25:19.614237 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:25:19 crc kubenswrapper[4814]: I0122 05:25:19.614869 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:25:19 crc kubenswrapper[4814]: I0122 05:25:19.614937 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:25:19 crc kubenswrapper[4814]: I0122 05:25:19.615777 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f8c5987ebbafcee5f7525abf6e4789b335512d5dc41a68223adfc3fcea787722"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:25:19 crc kubenswrapper[4814]: I0122 05:25:19.615901 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://f8c5987ebbafcee5f7525abf6e4789b335512d5dc41a68223adfc3fcea787722" gracePeriod=600 Jan 22 05:25:20 crc kubenswrapper[4814]: I0122 05:25:20.547390 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="f8c5987ebbafcee5f7525abf6e4789b335512d5dc41a68223adfc3fcea787722" exitCode=0 Jan 22 05:25:20 crc kubenswrapper[4814]: I0122 05:25:20.547474 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"f8c5987ebbafcee5f7525abf6e4789b335512d5dc41a68223adfc3fcea787722"} Jan 22 05:25:20 crc kubenswrapper[4814]: I0122 05:25:20.548858 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"9f2f025680c6fd4e90a0353a156dc3eb3a96411365552c383a9346ea5768b5f9"} Jan 22 05:25:20 crc kubenswrapper[4814]: I0122 05:25:20.548911 4814 scope.go:117] "RemoveContainer" containerID="979c5db6843312fa7a473e2110545392708ce2e176fb9f64ebf223b017a53711" Jan 22 05:27:19 crc kubenswrapper[4814]: I0122 05:27:19.615592 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:27:19 crc kubenswrapper[4814]: I0122 05:27:19.616965 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:27:49 crc kubenswrapper[4814]: I0122 05:27:49.614433 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:27:49 crc kubenswrapper[4814]: I0122 05:27:49.615051 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.858322 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk"] Jan 22 05:28:18 crc kubenswrapper[4814]: E0122 05:28:18.859048 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29aae90f-3db5-4e31-a13e-35049f8ff2de" containerName="registry" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.859063 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="29aae90f-3db5-4e31-a13e-35049f8ff2de" containerName="registry" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.859197 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="29aae90f-3db5-4e31-a13e-35049f8ff2de" containerName="registry" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.859665 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.861108 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-877gl"] Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.861771 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-877gl" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.864787 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.864987 4814 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-zvm4c" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.865183 4814 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-dkm5n" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.865354 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.884534 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-877gl"] Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.886230 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk"] Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.905490 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4666l"] Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.906335 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.911876 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swgf2\" (UniqueName: \"kubernetes.io/projected/8fc0a0f9-dc10-4fd4-b0ab-c2ba6f4f67e0-kube-api-access-swgf2\") pod \"cert-manager-858654f9db-877gl\" (UID: \"8fc0a0f9-dc10-4fd4-b0ab-c2ba6f4f67e0\") " pod="cert-manager/cert-manager-858654f9db-877gl" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.911906 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmm4d\" (UniqueName: \"kubernetes.io/projected/ca1308a2-44df-461d-8817-0e098470dc68-kube-api-access-mmm4d\") pod \"cert-manager-webhook-687f57d79b-4666l\" (UID: \"ca1308a2-44df-461d-8817-0e098470dc68\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.911975 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqbfc\" (UniqueName: \"kubernetes.io/projected/c9f82827-f6e6-477b-9afd-4649e5413ec4-kube-api-access-cqbfc\") pod \"cert-manager-cainjector-cf98fcc89-k6jjk\" (UID: \"c9f82827-f6e6-477b-9afd-4649e5413ec4\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.918803 4814 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-x8j8c" Jan 22 05:28:18 crc kubenswrapper[4814]: I0122 05:28:18.922426 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4666l"] Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.013538 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqbfc\" (UniqueName: \"kubernetes.io/projected/c9f82827-f6e6-477b-9afd-4649e5413ec4-kube-api-access-cqbfc\") pod \"cert-manager-cainjector-cf98fcc89-k6jjk\" (UID: \"c9f82827-f6e6-477b-9afd-4649e5413ec4\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.013609 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swgf2\" (UniqueName: \"kubernetes.io/projected/8fc0a0f9-dc10-4fd4-b0ab-c2ba6f4f67e0-kube-api-access-swgf2\") pod \"cert-manager-858654f9db-877gl\" (UID: \"8fc0a0f9-dc10-4fd4-b0ab-c2ba6f4f67e0\") " pod="cert-manager/cert-manager-858654f9db-877gl" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.013655 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmm4d\" (UniqueName: \"kubernetes.io/projected/ca1308a2-44df-461d-8817-0e098470dc68-kube-api-access-mmm4d\") pod \"cert-manager-webhook-687f57d79b-4666l\" (UID: \"ca1308a2-44df-461d-8817-0e098470dc68\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.030464 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmm4d\" (UniqueName: \"kubernetes.io/projected/ca1308a2-44df-461d-8817-0e098470dc68-kube-api-access-mmm4d\") pod \"cert-manager-webhook-687f57d79b-4666l\" (UID: \"ca1308a2-44df-461d-8817-0e098470dc68\") " pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.030691 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swgf2\" (UniqueName: \"kubernetes.io/projected/8fc0a0f9-dc10-4fd4-b0ab-c2ba6f4f67e0-kube-api-access-swgf2\") pod \"cert-manager-858654f9db-877gl\" (UID: \"8fc0a0f9-dc10-4fd4-b0ab-c2ba6f4f67e0\") " pod="cert-manager/cert-manager-858654f9db-877gl" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.031937 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqbfc\" (UniqueName: \"kubernetes.io/projected/c9f82827-f6e6-477b-9afd-4649e5413ec4-kube-api-access-cqbfc\") pod \"cert-manager-cainjector-cf98fcc89-k6jjk\" (UID: \"c9f82827-f6e6-477b-9afd-4649e5413ec4\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.179120 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.188348 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-877gl" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.217979 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.450694 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-877gl"] Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.460345 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.478248 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-4666l"] Jan 22 05:28:19 crc kubenswrapper[4814]: W0122 05:28:19.483378 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca1308a2_44df_461d_8817_0e098470dc68.slice/crio-4f2b5b7843dcbc20f7945cf03d661c0d5075050c2aa83388a247bd577ade2dae WatchSource:0}: Error finding container 4f2b5b7843dcbc20f7945cf03d661c0d5075050c2aa83388a247bd577ade2dae: Status 404 returned error can't find the container with id 4f2b5b7843dcbc20f7945cf03d661c0d5075050c2aa83388a247bd577ade2dae Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.586931 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk"] Jan 22 05:28:19 crc kubenswrapper[4814]: W0122 05:28:19.589411 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9f82827_f6e6_477b_9afd_4649e5413ec4.slice/crio-239def64d0f8e20161207a22ceded7bd3ce69e4a0832235977a0f5587b9517ef WatchSource:0}: Error finding container 239def64d0f8e20161207a22ceded7bd3ce69e4a0832235977a0f5587b9517ef: Status 404 returned error can't find the container with id 239def64d0f8e20161207a22ceded7bd3ce69e4a0832235977a0f5587b9517ef Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.614220 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.614265 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.614301 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.614826 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9f2f025680c6fd4e90a0353a156dc3eb3a96411365552c383a9346ea5768b5f9"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.614882 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://9f2f025680c6fd4e90a0353a156dc3eb3a96411365552c383a9346ea5768b5f9" gracePeriod=600 Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.821047 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk" event={"ID":"c9f82827-f6e6-477b-9afd-4649e5413ec4","Type":"ContainerStarted","Data":"239def64d0f8e20161207a22ceded7bd3ce69e4a0832235977a0f5587b9517ef"} Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.822555 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" event={"ID":"ca1308a2-44df-461d-8817-0e098470dc68","Type":"ContainerStarted","Data":"4f2b5b7843dcbc20f7945cf03d661c0d5075050c2aa83388a247bd577ade2dae"} Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.823598 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-877gl" event={"ID":"8fc0a0f9-dc10-4fd4-b0ab-c2ba6f4f67e0","Type":"ContainerStarted","Data":"ac7cec10d002bc44e3bff867ff0dbd34484785544f5bd6b22b8e13962a5559f9"} Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.825329 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="9f2f025680c6fd4e90a0353a156dc3eb3a96411365552c383a9346ea5768b5f9" exitCode=0 Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.825384 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"9f2f025680c6fd4e90a0353a156dc3eb3a96411365552c383a9346ea5768b5f9"} Jan 22 05:28:19 crc kubenswrapper[4814]: I0122 05:28:19.825423 4814 scope.go:117] "RemoveContainer" containerID="f8c5987ebbafcee5f7525abf6e4789b335512d5dc41a68223adfc3fcea787722" Jan 22 05:28:20 crc kubenswrapper[4814]: I0122 05:28:20.835369 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"b60d5a55f7f3e7c7e151368bd532eb06ab5f80edff26a6360b765f6b4951f49e"} Jan 22 05:28:22 crc kubenswrapper[4814]: I0122 05:28:22.858332 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" event={"ID":"ca1308a2-44df-461d-8817-0e098470dc68","Type":"ContainerStarted","Data":"05450bb53f6b3cec9532bba2077d94a732407dcaf3cf51c3ea6759c45fc2d2f5"} Jan 22 05:28:22 crc kubenswrapper[4814]: I0122 05:28:22.858864 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" Jan 22 05:28:22 crc kubenswrapper[4814]: I0122 05:28:22.886576 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" podStartSLOduration=2.305393297 podStartE2EDuration="4.886513583s" podCreationTimestamp="2026-01-22 05:28:18 +0000 UTC" firstStartedPulling="2026-01-22 05:28:19.488322251 +0000 UTC m=+585.571810466" lastFinishedPulling="2026-01-22 05:28:22.069442537 +0000 UTC m=+588.152930752" observedRunningTime="2026-01-22 05:28:22.875378479 +0000 UTC m=+588.958866704" watchObservedRunningTime="2026-01-22 05:28:22.886513583 +0000 UTC m=+588.970001838" Jan 22 05:28:23 crc kubenswrapper[4814]: I0122 05:28:23.863703 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk" event={"ID":"c9f82827-f6e6-477b-9afd-4649e5413ec4","Type":"ContainerStarted","Data":"96b412ab5906c9554d6327ca50c7fde0d4e8820a8532690d46e9cab0f514a7e6"} Jan 22 05:28:23 crc kubenswrapper[4814]: I0122 05:28:23.864994 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-877gl" event={"ID":"8fc0a0f9-dc10-4fd4-b0ab-c2ba6f4f67e0","Type":"ContainerStarted","Data":"2d302cf3dc9d78736d4028c6b514d74fdbb8c7d0ebc5f1e4a3655f941f5a0284"} Jan 22 05:28:23 crc kubenswrapper[4814]: I0122 05:28:23.877804 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-k6jjk" podStartSLOduration=2.818321982 podStartE2EDuration="5.877784466s" podCreationTimestamp="2026-01-22 05:28:18 +0000 UTC" firstStartedPulling="2026-01-22 05:28:19.590664849 +0000 UTC m=+585.674153064" lastFinishedPulling="2026-01-22 05:28:22.650127333 +0000 UTC m=+588.733615548" observedRunningTime="2026-01-22 05:28:23.876063152 +0000 UTC m=+589.959551377" watchObservedRunningTime="2026-01-22 05:28:23.877784466 +0000 UTC m=+589.961272671" Jan 22 05:28:23 crc kubenswrapper[4814]: I0122 05:28:23.889914 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-877gl" podStartSLOduration=1.8186291639999999 podStartE2EDuration="5.889897251s" podCreationTimestamp="2026-01-22 05:28:18 +0000 UTC" firstStartedPulling="2026-01-22 05:28:19.460173739 +0000 UTC m=+585.543661954" lastFinishedPulling="2026-01-22 05:28:23.531441836 +0000 UTC m=+589.614930041" observedRunningTime="2026-01-22 05:28:23.888941451 +0000 UTC m=+589.972429676" watchObservedRunningTime="2026-01-22 05:28:23.889897251 +0000 UTC m=+589.973385466" Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.574558 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wvzgj"] Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.575930 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovn-controller" containerID="cri-o://8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9" gracePeriod=30 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.575943 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="nbdb" containerID="cri-o://b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696" gracePeriod=30 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.576135 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="northd" containerID="cri-o://08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a" gracePeriod=30 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.576212 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5" gracePeriod=30 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.576277 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kube-rbac-proxy-node" containerID="cri-o://c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3" gracePeriod=30 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.576337 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovn-acl-logging" containerID="cri-o://8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c" gracePeriod=30 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.576589 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="sbdb" containerID="cri-o://78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200" gracePeriod=30 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.681232 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" containerID="cri-o://74c22517da4f736a98526fca6fa3436f7c2cba2f848f165c31d69f178637895f" gracePeriod=30 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.898946 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/2.log" Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.899669 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/1.log" Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.899714 4814 generic.go:334] "Generic (PLEG): container finished" podID="22017d22-7b4d-4e3d-bbae-ff564c64bd7b" containerID="dea1d487fb592deca0be2c7b5b5a107858c92384301dc9ef3976e3456777ab8e" exitCode=2 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.899775 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rq55l" event={"ID":"22017d22-7b4d-4e3d-bbae-ff564c64bd7b","Type":"ContainerDied","Data":"dea1d487fb592deca0be2c7b5b5a107858c92384301dc9ef3976e3456777ab8e"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.899806 4814 scope.go:117] "RemoveContainer" containerID="f22998162f0ec0e1506bc8201a3ed88f8dc47ae492f47e09ef1ce1ecaf1ed181" Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.900963 4814 scope.go:117] "RemoveContainer" containerID="dea1d487fb592deca0be2c7b5b5a107858c92384301dc9ef3976e3456777ab8e" Jan 22 05:28:28 crc kubenswrapper[4814]: E0122 05:28:28.901212 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-rq55l_openshift-multus(22017d22-7b4d-4e3d-bbae-ff564c64bd7b)\"" pod="openshift-multus/multus-rq55l" podUID="22017d22-7b4d-4e3d-bbae-ff564c64bd7b" Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.901660 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovnkube-controller/3.log" Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.906835 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovn-acl-logging/0.log" Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907251 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovn-controller/0.log" Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907578 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="74c22517da4f736a98526fca6fa3436f7c2cba2f848f165c31d69f178637895f" exitCode=0 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907600 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200" exitCode=0 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907608 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696" exitCode=0 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907615 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a" exitCode=0 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907634 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5" exitCode=0 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907641 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3" exitCode=0 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907648 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c" exitCode=143 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907654 4814 generic.go:334] "Generic (PLEG): container finished" podID="55649399-9fd6-4e9a-b249-ce01b498c626" containerID="8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9" exitCode=143 Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907673 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"74c22517da4f736a98526fca6fa3436f7c2cba2f848f165c31d69f178637895f"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907695 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907704 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907713 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907724 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907733 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907742 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.907752 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9"} Jan 22 05:28:28 crc kubenswrapper[4814]: I0122 05:28:28.938028 4814 scope.go:117] "RemoveContainer" containerID="4f44817b252b8597c748b3dacbc3a6a35d29af4272aee1199dd8441a9ac8c2eb" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.221847 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-4666l" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.403686 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovn-acl-logging/0.log" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.404981 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovn-controller/0.log" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.405827 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.465882 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-ovn-kubernetes\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.465940 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-var-lib-openvswitch\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.465976 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-netd\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466004 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-systemd\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466044 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-openvswitch\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466072 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466104 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55649399-9fd6-4e9a-b249-ce01b498c626-ovn-node-metrics-cert\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466193 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466185 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466288 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466290 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466235 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-var-lib-cni-networks-ovn-kubernetes\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466413 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-env-overrides\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466485 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-config\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466543 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-ovn\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466582 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-slash\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466686 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-bin\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466727 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-systemd-units\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466772 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5scm\" (UniqueName: \"kubernetes.io/projected/55649399-9fd6-4e9a-b249-ce01b498c626-kube-api-access-q5scm\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466815 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-node-log\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466868 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-netns\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466920 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-script-lib\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.466958 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-log-socket\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467041 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-etc-openvswitch\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467085 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-kubelet\") pod \"55649399-9fd6-4e9a-b249-ce01b498c626\" (UID: \"55649399-9fd6-4e9a-b249-ce01b498c626\") " Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467305 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467322 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467381 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-log-socket" (OuterVolumeSpecName: "log-socket") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467381 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-node-log" (OuterVolumeSpecName: "node-log") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467420 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467428 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467488 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-slash" (OuterVolumeSpecName: "host-slash") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467508 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467565 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467585 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467677 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467944 4814 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467982 4814 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468008 4814 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468035 4814 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468061 4814 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468086 4814 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468107 4814 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-slash\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468129 4814 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468152 4814 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468174 4814 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-node-log\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468218 4814 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468243 4814 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-log-socket\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468267 4814 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468289 4814 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468311 4814 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.468339 4814 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.467945 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.476059 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55649399-9fd6-4e9a-b249-ce01b498c626-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.478994 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55649399-9fd6-4e9a-b249-ce01b498c626-kube-api-access-q5scm" (OuterVolumeSpecName: "kube-api-access-q5scm") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "kube-api-access-q5scm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516111 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-sxtpj"] Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516383 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516404 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516421 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516431 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516440 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kube-rbac-proxy-node" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516448 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kube-rbac-proxy-node" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516462 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kubecfg-setup" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516473 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kubecfg-setup" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516491 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovn-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516503 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovn-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516523 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="northd" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516531 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="northd" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516541 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516550 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516561 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="nbdb" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516569 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="nbdb" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516583 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516591 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516604 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516613 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516644 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="sbdb" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516652 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="sbdb" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516659 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516667 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: E0122 05:28:29.516681 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovn-acl-logging" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516689 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovn-acl-logging" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516815 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovn-acl-logging" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516831 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516840 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovn-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516851 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kube-rbac-proxy-ovn-metrics" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516864 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516872 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="kube-rbac-proxy-node" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516885 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="sbdb" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516895 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516903 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="nbdb" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516914 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="northd" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.516925 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.517238 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" containerName="ovnkube-controller" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.519203 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "55649399-9fd6-4e9a-b249-ce01b498c626" (UID: "55649399-9fd6-4e9a-b249-ce01b498c626"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.520378 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569341 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovn-node-metrics-cert\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569423 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-cni-netd\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569455 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-run-ovn-kubernetes\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569502 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569528 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovnkube-config\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569553 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-kubelet\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569649 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-systemd\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569670 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-slash\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569699 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-var-lib-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569739 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-ovn\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569768 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-systemd-units\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569811 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-log-socket\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569836 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-node-log\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569864 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569905 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgb94\" (UniqueName: \"kubernetes.io/projected/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-kube-api-access-cgb94\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569929 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-etc-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569974 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-env-overrides\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.569996 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovnkube-script-lib\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.570015 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-run-netns\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.570059 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-cni-bin\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.570127 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5scm\" (UniqueName: \"kubernetes.io/projected/55649399-9fd6-4e9a-b249-ce01b498c626-kube-api-access-q5scm\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.570142 4814 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/55649399-9fd6-4e9a-b249-ce01b498c626-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.570153 4814 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/55649399-9fd6-4e9a-b249-ce01b498c626-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.570164 4814 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/55649399-9fd6-4e9a-b249-ce01b498c626-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671344 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-env-overrides\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671414 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovnkube-script-lib\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671455 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-run-netns\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671485 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-cni-bin\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671532 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-cni-netd\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671553 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovn-node-metrics-cert\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671578 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-run-ovn-kubernetes\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671699 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671728 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovnkube-config\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671752 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-kubelet\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671800 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-slash\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671822 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-systemd\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671867 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-var-lib-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671896 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-ovn\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671945 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-systemd-units\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671967 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-log-socket\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.671993 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-node-log\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672039 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672063 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgb94\" (UniqueName: \"kubernetes.io/projected/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-kube-api-access-cgb94\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672104 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-etc-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672196 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-etc-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672806 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-kubelet\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672876 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-env-overrides\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672923 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-slash\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672952 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-systemd\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.672981 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-var-lib-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.673020 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-ovn\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.673046 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-systemd-units\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.673071 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-log-socket\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.673116 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-node-log\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.673144 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.673565 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovnkube-script-lib\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.673646 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-run-ovn-kubernetes\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.673680 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-run-openvswitch\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.674019 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-cni-bin\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.674053 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-cni-netd\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.674143 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-host-run-netns\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.674231 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovnkube-config\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.678443 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-ovn-node-metrics-cert\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.701434 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgb94\" (UniqueName: \"kubernetes.io/projected/eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885-kube-api-access-cgb94\") pod \"ovnkube-node-sxtpj\" (UID: \"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.842557 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:29 crc kubenswrapper[4814]: W0122 05:28:29.878479 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeab1f3a1_b6f0_4c60_8f48_bf7cb0eaf885.slice/crio-2f91b28ce52c293602b6ac1a95cfee92696163b5b37dd6283b4db5fbeefa6602 WatchSource:0}: Error finding container 2f91b28ce52c293602b6ac1a95cfee92696163b5b37dd6283b4db5fbeefa6602: Status 404 returned error can't find the container with id 2f91b28ce52c293602b6ac1a95cfee92696163b5b37dd6283b4db5fbeefa6602 Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.920553 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/2.log" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.942329 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovn-acl-logging/0.log" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.947159 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-wvzgj_55649399-9fd6-4e9a-b249-ce01b498c626/ovn-controller/0.log" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.959144 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" event={"ID":"55649399-9fd6-4e9a-b249-ce01b498c626","Type":"ContainerDied","Data":"4f190f7eddd52e6ead3de482c65ce4708567155b7b8ceeaa98c96c33646a2ef4"} Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.959235 4814 scope.go:117] "RemoveContainer" containerID="74c22517da4f736a98526fca6fa3436f7c2cba2f848f165c31d69f178637895f" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.959602 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wvzgj" Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.968815 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"2f91b28ce52c293602b6ac1a95cfee92696163b5b37dd6283b4db5fbeefa6602"} Jan 22 05:28:29 crc kubenswrapper[4814]: I0122 05:28:29.994917 4814 scope.go:117] "RemoveContainer" containerID="78e83836df287f8ca620ed3b1d847f2686762761aee5dadab0ac017f037bf200" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.005797 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wvzgj"] Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.014002 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wvzgj"] Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.019649 4814 scope.go:117] "RemoveContainer" containerID="b3be6522723b1715d0dd66ff02e4e2031e14a4b45bb9a41da8bcc813026dd696" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.040498 4814 scope.go:117] "RemoveContainer" containerID="08e8638e7fa6626e6d9b74c475e7f8d24ff44f708515a04b569417c4dd03b48a" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.057205 4814 scope.go:117] "RemoveContainer" containerID="341845024e28f825ccf6a8417b0a9782a714e502d897feb9459427f45e0326c5" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.074495 4814 scope.go:117] "RemoveContainer" containerID="c99e1a339f9356289314975888a50af475621c427f826d4f6e10958178a9b4d3" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.128620 4814 scope.go:117] "RemoveContainer" containerID="8939bac6d52d870211e829dbb9e427a7eb5ffe54111f6077456dffef77e6ed9c" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.147039 4814 scope.go:117] "RemoveContainer" containerID="8387d73fdb9f439909562316514c66b93ef9f7facd7aeffce48f0049308d23f9" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.162351 4814 scope.go:117] "RemoveContainer" containerID="ae89e31f6b2d4a56db5d04fddf52762e246f94d5a86e999fd632f61c463d060d" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.356477 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55649399-9fd6-4e9a-b249-ce01b498c626" path="/var/lib/kubelet/pods/55649399-9fd6-4e9a-b249-ce01b498c626/volumes" Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.977746 4814 generic.go:334] "Generic (PLEG): container finished" podID="eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885" containerID="ea7ca0fcc7970405a96c0bd582d37e99551dbc8de224b046a3ada9e71184128f" exitCode=0 Jan 22 05:28:30 crc kubenswrapper[4814]: I0122 05:28:30.977882 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerDied","Data":"ea7ca0fcc7970405a96c0bd582d37e99551dbc8de224b046a3ada9e71184128f"} Jan 22 05:28:31 crc kubenswrapper[4814]: I0122 05:28:31.993304 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"930d12e43fad3d0d2b4f6af244df5cd9231af6ac4424cc8f00051f7851fc9e2d"} Jan 22 05:28:31 crc kubenswrapper[4814]: I0122 05:28:31.993645 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"36c6d73fb47b5b499dabf39ecc1e83a452b764a87cb730385f0c700babf92e93"} Jan 22 05:28:31 crc kubenswrapper[4814]: I0122 05:28:31.993662 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"f8cfa50ce25ca74dbfc3eb87a59088e3a2e0d8269a481358be2d74b7508448e9"} Jan 22 05:28:31 crc kubenswrapper[4814]: I0122 05:28:31.993673 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"354812d1d150402ab7ae5fd222dad02e3c255b2bc288356337e8fa0c03e1a8e3"} Jan 22 05:28:31 crc kubenswrapper[4814]: I0122 05:28:31.993685 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"28c0b86917d9036a60e191ad68d0ac0b26122ee8855241afa888a482030f5802"} Jan 22 05:28:31 crc kubenswrapper[4814]: I0122 05:28:31.993697 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"a5d2eab47fb1630a56d7720a2b059bfe79d344584ac9cb018d06c301a4c1b06e"} Jan 22 05:28:35 crc kubenswrapper[4814]: I0122 05:28:35.023184 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"f8406de3c0e3bc041aec2870d2b78ac5cf082b411b363537bd27cf750d7f7b44"} Jan 22 05:28:37 crc kubenswrapper[4814]: I0122 05:28:37.457836 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" event={"ID":"eab1f3a1-b6f0-4c60-8f48-bf7cb0eaf885","Type":"ContainerStarted","Data":"bca174bfb9307938f24eeb13e0695e1604ef5c63a2f6ebb6099170f12d7244b5"} Jan 22 05:28:37 crc kubenswrapper[4814]: I0122 05:28:37.459089 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:37 crc kubenswrapper[4814]: I0122 05:28:37.459117 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:37 crc kubenswrapper[4814]: I0122 05:28:37.533751 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" podStartSLOduration=8.533735245999999 podStartE2EDuration="8.533735246s" podCreationTimestamp="2026-01-22 05:28:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:28:37.531584719 +0000 UTC m=+603.615072964" watchObservedRunningTime="2026-01-22 05:28:37.533735246 +0000 UTC m=+603.617223461" Jan 22 05:28:37 crc kubenswrapper[4814]: I0122 05:28:37.543573 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:38 crc kubenswrapper[4814]: I0122 05:28:38.465425 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:38 crc kubenswrapper[4814]: I0122 05:28:38.497949 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:28:44 crc kubenswrapper[4814]: I0122 05:28:44.349762 4814 scope.go:117] "RemoveContainer" containerID="dea1d487fb592deca0be2c7b5b5a107858c92384301dc9ef3976e3456777ab8e" Jan 22 05:28:44 crc kubenswrapper[4814]: E0122 05:28:44.351016 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-rq55l_openshift-multus(22017d22-7b4d-4e3d-bbae-ff564c64bd7b)\"" pod="openshift-multus/multus-rq55l" podUID="22017d22-7b4d-4e3d-bbae-ff564c64bd7b" Jan 22 05:28:57 crc kubenswrapper[4814]: I0122 05:28:57.343914 4814 scope.go:117] "RemoveContainer" containerID="dea1d487fb592deca0be2c7b5b5a107858c92384301dc9ef3976e3456777ab8e" Jan 22 05:28:57 crc kubenswrapper[4814]: I0122 05:28:57.590504 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rq55l_22017d22-7b4d-4e3d-bbae-ff564c64bd7b/kube-multus/2.log" Jan 22 05:28:57 crc kubenswrapper[4814]: I0122 05:28:57.590957 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rq55l" event={"ID":"22017d22-7b4d-4e3d-bbae-ff564c64bd7b","Type":"ContainerStarted","Data":"138346d19c190dcaadbfa8b15edab8c62b92ef336769457056e4f9df690f1b98"} Jan 22 05:28:59 crc kubenswrapper[4814]: I0122 05:28:59.881074 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sxtpj" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.796004 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw"] Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.798184 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.800346 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.807305 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw"] Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.863256 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmj6b\" (UniqueName: \"kubernetes.io/projected/62f686ad-b838-4047-9bdc-428e9e655222-kube-api-access-dmj6b\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.863318 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.863338 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.964765 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.964817 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.964911 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmj6b\" (UniqueName: \"kubernetes.io/projected/62f686ad-b838-4047-9bdc-428e9e655222-kube-api-access-dmj6b\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.965314 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:15 crc kubenswrapper[4814]: I0122 05:29:15.965463 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:16 crc kubenswrapper[4814]: I0122 05:29:16.002566 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmj6b\" (UniqueName: \"kubernetes.io/projected/62f686ad-b838-4047-9bdc-428e9e655222-kube-api-access-dmj6b\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:16 crc kubenswrapper[4814]: I0122 05:29:16.116848 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:16 crc kubenswrapper[4814]: I0122 05:29:16.365020 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw"] Jan 22 05:29:16 crc kubenswrapper[4814]: I0122 05:29:16.739859 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" event={"ID":"62f686ad-b838-4047-9bdc-428e9e655222","Type":"ContainerStarted","Data":"f3005561546e9ab8942caa8fa84a35142089bd7989226257cdeca2cf7000dae1"} Jan 22 05:29:16 crc kubenswrapper[4814]: I0122 05:29:16.739911 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" event={"ID":"62f686ad-b838-4047-9bdc-428e9e655222","Type":"ContainerStarted","Data":"a9187ee424f74e405d51c75ae17225211a74cf74a0543ae42d49e53f3f9786c0"} Jan 22 05:29:17 crc kubenswrapper[4814]: I0122 05:29:17.747517 4814 generic.go:334] "Generic (PLEG): container finished" podID="62f686ad-b838-4047-9bdc-428e9e655222" containerID="f3005561546e9ab8942caa8fa84a35142089bd7989226257cdeca2cf7000dae1" exitCode=0 Jan 22 05:29:17 crc kubenswrapper[4814]: I0122 05:29:17.747810 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" event={"ID":"62f686ad-b838-4047-9bdc-428e9e655222","Type":"ContainerDied","Data":"f3005561546e9ab8942caa8fa84a35142089bd7989226257cdeca2cf7000dae1"} Jan 22 05:29:19 crc kubenswrapper[4814]: I0122 05:29:19.774996 4814 generic.go:334] "Generic (PLEG): container finished" podID="62f686ad-b838-4047-9bdc-428e9e655222" containerID="8a17bf54886f5babd39c8bee2df5dd55955425473c7ed7d326cf8ac145f85841" exitCode=0 Jan 22 05:29:19 crc kubenswrapper[4814]: I0122 05:29:19.775077 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" event={"ID":"62f686ad-b838-4047-9bdc-428e9e655222","Type":"ContainerDied","Data":"8a17bf54886f5babd39c8bee2df5dd55955425473c7ed7d326cf8ac145f85841"} Jan 22 05:29:20 crc kubenswrapper[4814]: I0122 05:29:20.788577 4814 generic.go:334] "Generic (PLEG): container finished" podID="62f686ad-b838-4047-9bdc-428e9e655222" containerID="da6e34604bcc59e5a2c99ef34b2a7c7a574810aee53848762f7f736c98e546c2" exitCode=0 Jan 22 05:29:20 crc kubenswrapper[4814]: I0122 05:29:20.788693 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" event={"ID":"62f686ad-b838-4047-9bdc-428e9e655222","Type":"ContainerDied","Data":"da6e34604bcc59e5a2c99ef34b2a7c7a574810aee53848762f7f736c98e546c2"} Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.103442 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.159447 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-util\") pod \"62f686ad-b838-4047-9bdc-428e9e655222\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.159531 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmj6b\" (UniqueName: \"kubernetes.io/projected/62f686ad-b838-4047-9bdc-428e9e655222-kube-api-access-dmj6b\") pod \"62f686ad-b838-4047-9bdc-428e9e655222\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.159728 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-bundle\") pod \"62f686ad-b838-4047-9bdc-428e9e655222\" (UID: \"62f686ad-b838-4047-9bdc-428e9e655222\") " Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.160659 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-bundle" (OuterVolumeSpecName: "bundle") pod "62f686ad-b838-4047-9bdc-428e9e655222" (UID: "62f686ad-b838-4047-9bdc-428e9e655222"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.166140 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62f686ad-b838-4047-9bdc-428e9e655222-kube-api-access-dmj6b" (OuterVolumeSpecName: "kube-api-access-dmj6b") pod "62f686ad-b838-4047-9bdc-428e9e655222" (UID: "62f686ad-b838-4047-9bdc-428e9e655222"). InnerVolumeSpecName "kube-api-access-dmj6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.183683 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-util" (OuterVolumeSpecName: "util") pod "62f686ad-b838-4047-9bdc-428e9e655222" (UID: "62f686ad-b838-4047-9bdc-428e9e655222"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.261074 4814 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.261113 4814 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/62f686ad-b838-4047-9bdc-428e9e655222-util\") on node \"crc\" DevicePath \"\"" Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.261125 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmj6b\" (UniqueName: \"kubernetes.io/projected/62f686ad-b838-4047-9bdc-428e9e655222-kube-api-access-dmj6b\") on node \"crc\" DevicePath \"\"" Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.805823 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.805711 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7132dksw" event={"ID":"62f686ad-b838-4047-9bdc-428e9e655222","Type":"ContainerDied","Data":"a9187ee424f74e405d51c75ae17225211a74cf74a0543ae42d49e53f3f9786c0"} Jan 22 05:29:22 crc kubenswrapper[4814]: I0122 05:29:22.806726 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9187ee424f74e405d51c75ae17225211a74cf74a0543ae42d49e53f3f9786c0" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.401705 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-c99ct"] Jan 22 05:29:24 crc kubenswrapper[4814]: E0122 05:29:24.402137 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f686ad-b838-4047-9bdc-428e9e655222" containerName="pull" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.402149 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f686ad-b838-4047-9bdc-428e9e655222" containerName="pull" Jan 22 05:29:24 crc kubenswrapper[4814]: E0122 05:29:24.402165 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f686ad-b838-4047-9bdc-428e9e655222" containerName="extract" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.402170 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f686ad-b838-4047-9bdc-428e9e655222" containerName="extract" Jan 22 05:29:24 crc kubenswrapper[4814]: E0122 05:29:24.402184 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f686ad-b838-4047-9bdc-428e9e655222" containerName="util" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.402190 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f686ad-b838-4047-9bdc-428e9e655222" containerName="util" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.402289 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="62f686ad-b838-4047-9bdc-428e9e655222" containerName="extract" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.402694 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-c99ct" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.405141 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.405156 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.405268 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-ntbln" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.410350 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-c99ct"] Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.488318 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvlg6\" (UniqueName: \"kubernetes.io/projected/bc44f7d8-1138-4f95-bb93-e18b303e585f-kube-api-access-fvlg6\") pod \"nmstate-operator-646758c888-c99ct\" (UID: \"bc44f7d8-1138-4f95-bb93-e18b303e585f\") " pod="openshift-nmstate/nmstate-operator-646758c888-c99ct" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.588852 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvlg6\" (UniqueName: \"kubernetes.io/projected/bc44f7d8-1138-4f95-bb93-e18b303e585f-kube-api-access-fvlg6\") pod \"nmstate-operator-646758c888-c99ct\" (UID: \"bc44f7d8-1138-4f95-bb93-e18b303e585f\") " pod="openshift-nmstate/nmstate-operator-646758c888-c99ct" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.604550 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvlg6\" (UniqueName: \"kubernetes.io/projected/bc44f7d8-1138-4f95-bb93-e18b303e585f-kube-api-access-fvlg6\") pod \"nmstate-operator-646758c888-c99ct\" (UID: \"bc44f7d8-1138-4f95-bb93-e18b303e585f\") " pod="openshift-nmstate/nmstate-operator-646758c888-c99ct" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.716884 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-c99ct" Jan 22 05:29:24 crc kubenswrapper[4814]: I0122 05:29:24.946167 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-c99ct"] Jan 22 05:29:25 crc kubenswrapper[4814]: I0122 05:29:25.823780 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-c99ct" event={"ID":"bc44f7d8-1138-4f95-bb93-e18b303e585f","Type":"ContainerStarted","Data":"af24e7e7deea88ecf26ceddcd88a2f0d1074865c7ff1acaea96e6f5d7bd7c91e"} Jan 22 05:29:27 crc kubenswrapper[4814]: I0122 05:29:27.836419 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-c99ct" event={"ID":"bc44f7d8-1138-4f95-bb93-e18b303e585f","Type":"ContainerStarted","Data":"ead506d8043067d297ebc541cc03cb1c909f3d543e09e71eff3f3fa28da32f15"} Jan 22 05:29:27 crc kubenswrapper[4814]: I0122 05:29:27.866432 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-c99ct" podStartSLOduration=1.540670785 podStartE2EDuration="3.866409269s" podCreationTimestamp="2026-01-22 05:29:24 +0000 UTC" firstStartedPulling="2026-01-22 05:29:24.965195575 +0000 UTC m=+651.048683790" lastFinishedPulling="2026-01-22 05:29:27.290934059 +0000 UTC m=+653.374422274" observedRunningTime="2026-01-22 05:29:27.861207228 +0000 UTC m=+653.944695483" watchObservedRunningTime="2026-01-22 05:29:27.866409269 +0000 UTC m=+653.949897514" Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.902303 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-hwd6l"] Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.904673 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.907598 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-55pz2" Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.917355 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6"] Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.918163 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.921613 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.934434 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6"] Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.938323 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-hwd6l"] Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.954411 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npfsx\" (UniqueName: \"kubernetes.io/projected/3b3c2d90-f81b-471e-8edd-6fded8370193-kube-api-access-npfsx\") pod \"nmstate-metrics-54757c584b-hwd6l\" (UID: \"3b3c2d90-f81b-471e-8edd-6fded8370193\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.954664 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jklfq\" (UniqueName: \"kubernetes.io/projected/00ec777b-581e-459d-878c-1d69c32bb763-kube-api-access-jklfq\") pod \"nmstate-webhook-8474b5b9d8-j96g6\" (UID: \"00ec777b-581e-459d-878c-1d69c32bb763\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.954766 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/00ec777b-581e-459d-878c-1d69c32bb763-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-j96g6\" (UID: \"00ec777b-581e-459d-878c-1d69c32bb763\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.959801 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-s4kgf"] Jan 22 05:29:28 crc kubenswrapper[4814]: I0122 05:29:28.960447 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.056034 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-ovs-socket\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.056101 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/00ec777b-581e-459d-878c-1d69c32bb763-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-j96g6\" (UID: \"00ec777b-581e-459d-878c-1d69c32bb763\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.056156 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-dbus-socket\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.056181 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-nmstate-lock\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.056208 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k49l5\" (UniqueName: \"kubernetes.io/projected/5cd3580b-2987-4bd5-8838-f8db17d778a0-kube-api-access-k49l5\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.056242 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npfsx\" (UniqueName: \"kubernetes.io/projected/3b3c2d90-f81b-471e-8edd-6fded8370193-kube-api-access-npfsx\") pod \"nmstate-metrics-54757c584b-hwd6l\" (UID: \"3b3c2d90-f81b-471e-8edd-6fded8370193\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.056283 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jklfq\" (UniqueName: \"kubernetes.io/projected/00ec777b-581e-459d-878c-1d69c32bb763-kube-api-access-jklfq\") pod \"nmstate-webhook-8474b5b9d8-j96g6\" (UID: \"00ec777b-581e-459d-878c-1d69c32bb763\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:29 crc kubenswrapper[4814]: E0122 05:29:29.056485 4814 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 22 05:29:29 crc kubenswrapper[4814]: E0122 05:29:29.056649 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/00ec777b-581e-459d-878c-1d69c32bb763-tls-key-pair podName:00ec777b-581e-459d-878c-1d69c32bb763 nodeName:}" failed. No retries permitted until 2026-01-22 05:29:29.556615256 +0000 UTC m=+655.640103471 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/00ec777b-581e-459d-878c-1d69c32bb763-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-j96g6" (UID: "00ec777b-581e-459d-878c-1d69c32bb763") : secret "openshift-nmstate-webhook" not found Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.091230 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm"] Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.091962 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.099872 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-829mg" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.100067 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.100440 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npfsx\" (UniqueName: \"kubernetes.io/projected/3b3c2d90-f81b-471e-8edd-6fded8370193-kube-api-access-npfsx\") pod \"nmstate-metrics-54757c584b-hwd6l\" (UID: \"3b3c2d90-f81b-471e-8edd-6fded8370193\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.101697 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm"] Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.105362 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jklfq\" (UniqueName: \"kubernetes.io/projected/00ec777b-581e-459d-878c-1d69c32bb763-kube-api-access-jklfq\") pod \"nmstate-webhook-8474b5b9d8-j96g6\" (UID: \"00ec777b-581e-459d-878c-1d69c32bb763\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.118871 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.157728 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/41a509f5-0527-440e-a9ad-c737503d1383-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.157767 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-dbus-socket\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.157790 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-nmstate-lock\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.157808 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k49l5\" (UniqueName: \"kubernetes.io/projected/5cd3580b-2987-4bd5-8838-f8db17d778a0-kube-api-access-k49l5\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.157831 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkkkc\" (UniqueName: \"kubernetes.io/projected/41a509f5-0527-440e-a9ad-c737503d1383-kube-api-access-nkkkc\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.157857 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a509f5-0527-440e-a9ad-c737503d1383-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.157886 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-ovs-socket\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.157940 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-ovs-socket\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.158171 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-dbus-socket\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.158201 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/5cd3580b-2987-4bd5-8838-f8db17d778a0-nmstate-lock\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.179145 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k49l5\" (UniqueName: \"kubernetes.io/projected/5cd3580b-2987-4bd5-8838-f8db17d778a0-kube-api-access-k49l5\") pod \"nmstate-handler-s4kgf\" (UID: \"5cd3580b-2987-4bd5-8838-f8db17d778a0\") " pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.230557 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.259077 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/41a509f5-0527-440e-a9ad-c737503d1383-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.259147 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkkkc\" (UniqueName: \"kubernetes.io/projected/41a509f5-0527-440e-a9ad-c737503d1383-kube-api-access-nkkkc\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.259200 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a509f5-0527-440e-a9ad-c737503d1383-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: E0122 05:29:29.259347 4814 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 22 05:29:29 crc kubenswrapper[4814]: E0122 05:29:29.259398 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/41a509f5-0527-440e-a9ad-c737503d1383-plugin-serving-cert podName:41a509f5-0527-440e-a9ad-c737503d1383 nodeName:}" failed. No retries permitted until 2026-01-22 05:29:29.759383139 +0000 UTC m=+655.842871354 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/41a509f5-0527-440e-a9ad-c737503d1383-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-852sm" (UID: "41a509f5-0527-440e-a9ad-c737503d1383") : secret "plugin-serving-cert" not found Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.260257 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/41a509f5-0527-440e-a9ad-c737503d1383-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.283511 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.284501 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkkkc\" (UniqueName: \"kubernetes.io/projected/41a509f5-0527-440e-a9ad-c737503d1383-kube-api-access-nkkkc\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.392026 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-846b68848b-ptb9r"] Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.393414 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.408954 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-846b68848b-ptb9r"] Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.460975 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-service-ca\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.461029 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-serving-cert\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.461049 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-oauth-config\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.461110 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-config\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.461126 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-oauth-serving-cert\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.461140 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-trusted-ca-bundle\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.461156 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8bc5\" (UniqueName: \"kubernetes.io/projected/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-kube-api-access-s8bc5\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.561862 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/00ec777b-581e-459d-878c-1d69c32bb763-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-j96g6\" (UID: \"00ec777b-581e-459d-878c-1d69c32bb763\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.561904 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-oauth-serving-cert\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.561919 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-config\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.561935 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-trusted-ca-bundle\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.561950 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8bc5\" (UniqueName: \"kubernetes.io/projected/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-kube-api-access-s8bc5\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.561979 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-service-ca\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.562008 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-serving-cert\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.562024 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-oauth-config\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.564183 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-trusted-ca-bundle\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.565163 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-service-ca\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.565867 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-oauth-serving-cert\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.567022 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-config\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.567531 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/00ec777b-581e-459d-878c-1d69c32bb763-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-j96g6\" (UID: \"00ec777b-581e-459d-878c-1d69c32bb763\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.568185 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-oauth-config\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.569065 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-console-serving-cert\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.588816 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8bc5\" (UniqueName: \"kubernetes.io/projected/0249f4b3-a915-4ee2-a3c1-1306480ec3e8-kube-api-access-s8bc5\") pod \"console-846b68848b-ptb9r\" (UID: \"0249f4b3-a915-4ee2-a3c1-1306480ec3e8\") " pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.709024 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.709275 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-hwd6l"] Jan 22 05:29:29 crc kubenswrapper[4814]: W0122 05:29:29.731275 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b3c2d90_f81b_471e_8edd_6fded8370193.slice/crio-ad587c1de80096eaf9fd5c9190f971b549efb27a88a0e839f2a991853eca86e2 WatchSource:0}: Error finding container ad587c1de80096eaf9fd5c9190f971b549efb27a88a0e839f2a991853eca86e2: Status 404 returned error can't find the container with id ad587c1de80096eaf9fd5c9190f971b549efb27a88a0e839f2a991853eca86e2 Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.764830 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a509f5-0527-440e-a9ad-c737503d1383-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.767955 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/41a509f5-0527-440e-a9ad-c737503d1383-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-852sm\" (UID: \"41a509f5-0527-440e-a9ad-c737503d1383\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.835355 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.860040 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" event={"ID":"3b3c2d90-f81b-471e-8edd-6fded8370193","Type":"ContainerStarted","Data":"ad587c1de80096eaf9fd5c9190f971b549efb27a88a0e839f2a991853eca86e2"} Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.863244 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-s4kgf" event={"ID":"5cd3580b-2987-4bd5-8838-f8db17d778a0","Type":"ContainerStarted","Data":"9dd968af1021b4d8ba6ed9b27da24bf7f1df99848e473fdf4205c5a0aaf94856"} Jan 22 05:29:29 crc kubenswrapper[4814]: I0122 05:29:29.931309 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-846b68848b-ptb9r"] Jan 22 05:29:29 crc kubenswrapper[4814]: W0122 05:29:29.945132 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0249f4b3_a915_4ee2_a3c1_1306480ec3e8.slice/crio-8af5bf99ad932e3da4aa609d93ce779a020117bcd3c29de7752cad9fc342bed2 WatchSource:0}: Error finding container 8af5bf99ad932e3da4aa609d93ce779a020117bcd3c29de7752cad9fc342bed2: Status 404 returned error can't find the container with id 8af5bf99ad932e3da4aa609d93ce779a020117bcd3c29de7752cad9fc342bed2 Jan 22 05:29:30 crc kubenswrapper[4814]: I0122 05:29:30.037472 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" Jan 22 05:29:30 crc kubenswrapper[4814]: I0122 05:29:30.246862 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm"] Jan 22 05:29:30 crc kubenswrapper[4814]: W0122 05:29:30.250116 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41a509f5_0527_440e_a9ad_c737503d1383.slice/crio-139953a8e44566e4b1e907873c2f5ff3659c9c045dd31a3d3533d07ce6a23916 WatchSource:0}: Error finding container 139953a8e44566e4b1e907873c2f5ff3659c9c045dd31a3d3533d07ce6a23916: Status 404 returned error can't find the container with id 139953a8e44566e4b1e907873c2f5ff3659c9c045dd31a3d3533d07ce6a23916 Jan 22 05:29:30 crc kubenswrapper[4814]: I0122 05:29:30.264121 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6"] Jan 22 05:29:30 crc kubenswrapper[4814]: I0122 05:29:30.879622 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" event={"ID":"00ec777b-581e-459d-878c-1d69c32bb763","Type":"ContainerStarted","Data":"12391fa0a5e92c29081774650f61425ae2bad58d4baf7c373cdee5190f6f8011"} Jan 22 05:29:30 crc kubenswrapper[4814]: I0122 05:29:30.882446 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-846b68848b-ptb9r" event={"ID":"0249f4b3-a915-4ee2-a3c1-1306480ec3e8","Type":"ContainerStarted","Data":"8846f2f5caebbc44ef58fcae7f99a7fa0fb055558f340a5f88f9eaaf48c47cae"} Jan 22 05:29:30 crc kubenswrapper[4814]: I0122 05:29:30.882541 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-846b68848b-ptb9r" event={"ID":"0249f4b3-a915-4ee2-a3c1-1306480ec3e8","Type":"ContainerStarted","Data":"8af5bf99ad932e3da4aa609d93ce779a020117bcd3c29de7752cad9fc342bed2"} Jan 22 05:29:30 crc kubenswrapper[4814]: I0122 05:29:30.885948 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" event={"ID":"41a509f5-0527-440e-a9ad-c737503d1383","Type":"ContainerStarted","Data":"139953a8e44566e4b1e907873c2f5ff3659c9c045dd31a3d3533d07ce6a23916"} Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.901773 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" event={"ID":"41a509f5-0527-440e-a9ad-c737503d1383","Type":"ContainerStarted","Data":"80ab19c5cffe5c642247316a0faacae094f6b77e9e2f3c53cee108b352dc61e4"} Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.905190 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-s4kgf" event={"ID":"5cd3580b-2987-4bd5-8838-f8db17d778a0","Type":"ContainerStarted","Data":"c3ee078e76eeb60492605e0567b1ddd5576238b70f0c1e2cebb9765e9271b078"} Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.905470 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.907666 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" event={"ID":"00ec777b-581e-459d-878c-1d69c32bb763","Type":"ContainerStarted","Data":"21975116004dab7d05001c1b478d1e3ff261a9003361532698a9e432337c3966"} Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.907975 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.909907 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" event={"ID":"3b3c2d90-f81b-471e-8edd-6fded8370193","Type":"ContainerStarted","Data":"2ebd30d20d52c1398a021df2b907ac64661464898b807fcb0068577a153d6378"} Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.924520 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-852sm" podStartSLOduration=1.684165375 podStartE2EDuration="3.92449599s" podCreationTimestamp="2026-01-22 05:29:29 +0000 UTC" firstStartedPulling="2026-01-22 05:29:30.25184441 +0000 UTC m=+656.335332625" lastFinishedPulling="2026-01-22 05:29:32.492175025 +0000 UTC m=+658.575663240" observedRunningTime="2026-01-22 05:29:32.924332435 +0000 UTC m=+659.007820690" watchObservedRunningTime="2026-01-22 05:29:32.92449599 +0000 UTC m=+659.007984225" Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.924891 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-846b68848b-ptb9r" podStartSLOduration=3.924882971 podStartE2EDuration="3.924882971s" podCreationTimestamp="2026-01-22 05:29:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:29:30.908398083 +0000 UTC m=+656.991886308" watchObservedRunningTime="2026-01-22 05:29:32.924882971 +0000 UTC m=+659.008371206" Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.988689 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" podStartSLOduration=2.771907193 podStartE2EDuration="4.988671189s" podCreationTimestamp="2026-01-22 05:29:28 +0000 UTC" firstStartedPulling="2026-01-22 05:29:30.270825477 +0000 UTC m=+656.354313692" lastFinishedPulling="2026-01-22 05:29:32.487589463 +0000 UTC m=+658.571077688" observedRunningTime="2026-01-22 05:29:32.965089528 +0000 UTC m=+659.048577783" watchObservedRunningTime="2026-01-22 05:29:32.988671189 +0000 UTC m=+659.072159414" Jan 22 05:29:32 crc kubenswrapper[4814]: I0122 05:29:32.989772 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-s4kgf" podStartSLOduration=1.7905002049999998 podStartE2EDuration="4.989766452s" podCreationTimestamp="2026-01-22 05:29:28 +0000 UTC" firstStartedPulling="2026-01-22 05:29:29.311101931 +0000 UTC m=+655.394590136" lastFinishedPulling="2026-01-22 05:29:32.510368128 +0000 UTC m=+658.593856383" observedRunningTime="2026-01-22 05:29:32.985651875 +0000 UTC m=+659.069140100" watchObservedRunningTime="2026-01-22 05:29:32.989766452 +0000 UTC m=+659.073254677" Jan 22 05:29:35 crc kubenswrapper[4814]: I0122 05:29:35.932071 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" event={"ID":"3b3c2d90-f81b-471e-8edd-6fded8370193","Type":"ContainerStarted","Data":"c822b763e60f751b327004b0b5808f78a422a884e7355f17318a1c54f59f3a12"} Jan 22 05:29:35 crc kubenswrapper[4814]: I0122 05:29:35.960542 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-hwd6l" podStartSLOduration=2.7525300230000003 podStartE2EDuration="7.960495698s" podCreationTimestamp="2026-01-22 05:29:28 +0000 UTC" firstStartedPulling="2026-01-22 05:29:29.734339645 +0000 UTC m=+655.817827870" lastFinishedPulling="2026-01-22 05:29:34.9423053 +0000 UTC m=+661.025793545" observedRunningTime="2026-01-22 05:29:35.958710923 +0000 UTC m=+662.042199178" watchObservedRunningTime="2026-01-22 05:29:35.960495698 +0000 UTC m=+662.043983943" Jan 22 05:29:39 crc kubenswrapper[4814]: I0122 05:29:39.309484 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-s4kgf" Jan 22 05:29:39 crc kubenswrapper[4814]: I0122 05:29:39.709910 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:39 crc kubenswrapper[4814]: I0122 05:29:39.709986 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:39 crc kubenswrapper[4814]: I0122 05:29:39.718052 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:39 crc kubenswrapper[4814]: I0122 05:29:39.968247 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-846b68848b-ptb9r" Jan 22 05:29:40 crc kubenswrapper[4814]: I0122 05:29:40.056543 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-jnnrg"] Jan 22 05:29:49 crc kubenswrapper[4814]: I0122 05:29:49.845331 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-j96g6" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.204068 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb"] Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.205118 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.208459 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb"] Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.208823 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.209026 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.309046 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfvkl\" (UniqueName: \"kubernetes.io/projected/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-kube-api-access-dfvkl\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.309102 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-config-volume\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.309149 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-secret-volume\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.410614 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfvkl\" (UniqueName: \"kubernetes.io/projected/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-kube-api-access-dfvkl\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.410688 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-config-volume\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.410734 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-secret-volume\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.411899 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-config-volume\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.417087 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-secret-volume\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.429201 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfvkl\" (UniqueName: \"kubernetes.io/projected/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-kube-api-access-dfvkl\") pod \"collect-profiles-29484330-jq5wb\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.525416 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:00 crc kubenswrapper[4814]: I0122 05:30:00.741179 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb"] Jan 22 05:30:01 crc kubenswrapper[4814]: I0122 05:30:01.107185 4814 generic.go:334] "Generic (PLEG): container finished" podID="e12e9b03-1d35-47a8-a8e8-47899f80a4f2" containerID="6b0382515290bcd633eae672ef4f6d930f86e16eaebd5e5f2cfcd55bcf5a40b3" exitCode=0 Jan 22 05:30:01 crc kubenswrapper[4814]: I0122 05:30:01.107303 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" event={"ID":"e12e9b03-1d35-47a8-a8e8-47899f80a4f2","Type":"ContainerDied","Data":"6b0382515290bcd633eae672ef4f6d930f86e16eaebd5e5f2cfcd55bcf5a40b3"} Jan 22 05:30:01 crc kubenswrapper[4814]: I0122 05:30:01.107697 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" event={"ID":"e12e9b03-1d35-47a8-a8e8-47899f80a4f2","Type":"ContainerStarted","Data":"fcabc5967b433096595d7da75dc0aae0c5d5aaabf34ee893a0aaa58962e66edd"} Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.343078 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.454952 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfvkl\" (UniqueName: \"kubernetes.io/projected/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-kube-api-access-dfvkl\") pod \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.455035 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-config-volume\") pod \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.455058 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-secret-volume\") pod \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\" (UID: \"e12e9b03-1d35-47a8-a8e8-47899f80a4f2\") " Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.456609 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-config-volume" (OuterVolumeSpecName: "config-volume") pod "e12e9b03-1d35-47a8-a8e8-47899f80a4f2" (UID: "e12e9b03-1d35-47a8-a8e8-47899f80a4f2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.460737 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e12e9b03-1d35-47a8-a8e8-47899f80a4f2" (UID: "e12e9b03-1d35-47a8-a8e8-47899f80a4f2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.461268 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-kube-api-access-dfvkl" (OuterVolumeSpecName: "kube-api-access-dfvkl") pod "e12e9b03-1d35-47a8-a8e8-47899f80a4f2" (UID: "e12e9b03-1d35-47a8-a8e8-47899f80a4f2"). InnerVolumeSpecName "kube-api-access-dfvkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.559136 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfvkl\" (UniqueName: \"kubernetes.io/projected/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-kube-api-access-dfvkl\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.559169 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:02 crc kubenswrapper[4814]: I0122 05:30:02.559178 4814 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e12e9b03-1d35-47a8-a8e8-47899f80a4f2-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:03 crc kubenswrapper[4814]: I0122 05:30:03.128085 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" event={"ID":"e12e9b03-1d35-47a8-a8e8-47899f80a4f2","Type":"ContainerDied","Data":"fcabc5967b433096595d7da75dc0aae0c5d5aaabf34ee893a0aaa58962e66edd"} Jan 22 05:30:03 crc kubenswrapper[4814]: I0122 05:30:03.128127 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fcabc5967b433096595d7da75dc0aae0c5d5aaabf34ee893a0aaa58962e66edd" Jan 22 05:30:03 crc kubenswrapper[4814]: I0122 05:30:03.129461 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.182873 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx"] Jan 22 05:30:04 crc kubenswrapper[4814]: E0122 05:30:04.183514 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e12e9b03-1d35-47a8-a8e8-47899f80a4f2" containerName="collect-profiles" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.183534 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="e12e9b03-1d35-47a8-a8e8-47899f80a4f2" containerName="collect-profiles" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.183733 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="e12e9b03-1d35-47a8-a8e8-47899f80a4f2" containerName="collect-profiles" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.185018 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.192534 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.202761 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx"] Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.280407 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwfp8\" (UniqueName: \"kubernetes.io/projected/d55e55c7-0d00-426a-bd2c-9a0cd024865a-kube-api-access-bwfp8\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.280826 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.280958 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.382790 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.382867 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.382970 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwfp8\" (UniqueName: \"kubernetes.io/projected/d55e55c7-0d00-426a-bd2c-9a0cd024865a-kube-api-access-bwfp8\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.384075 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.384105 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.414771 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwfp8\" (UniqueName: \"kubernetes.io/projected/d55e55c7-0d00-426a-bd2c-9a0cd024865a-kube-api-access-bwfp8\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.517595 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:04 crc kubenswrapper[4814]: I0122 05:30:04.808381 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx"] Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.105142 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-jnnrg" podUID="b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" containerName="console" containerID="cri-o://5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76" gracePeriod=15 Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.144430 4814 generic.go:334] "Generic (PLEG): container finished" podID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerID="0511a4441dbdabf138bd261d26c56e512cb265b2526325ff7fbe16d3a8a6b283" exitCode=0 Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.144507 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" event={"ID":"d55e55c7-0d00-426a-bd2c-9a0cd024865a","Type":"ContainerDied","Data":"0511a4441dbdabf138bd261d26c56e512cb265b2526325ff7fbe16d3a8a6b283"} Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.144546 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" event={"ID":"d55e55c7-0d00-426a-bd2c-9a0cd024865a","Type":"ContainerStarted","Data":"7ffabebcc527725d80ccee412c0246cfa561c9cb01f9fee4a2bdfed3ca69fd90"} Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.525895 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jnnrg_b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8/console/0.log" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.526032 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.600649 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-serving-cert\") pod \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.600716 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-trusted-ca-bundle\") pod \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.600759 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-np8fb\" (UniqueName: \"kubernetes.io/projected/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-kube-api-access-np8fb\") pod \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.600776 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-oauth-serving-cert\") pod \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.600942 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-config\") pod \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.600992 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-oauth-config\") pod \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.601017 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-service-ca\") pod \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\" (UID: \"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8\") " Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.601927 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-service-ca" (OuterVolumeSpecName: "service-ca") pod "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" (UID: "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.601998 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" (UID: "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.602372 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-config" (OuterVolumeSpecName: "console-config") pod "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" (UID: "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.602793 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" (UID: "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.607071 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-kube-api-access-np8fb" (OuterVolumeSpecName: "kube-api-access-np8fb") pod "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" (UID: "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8"). InnerVolumeSpecName "kube-api-access-np8fb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.607341 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" (UID: "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.608891 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" (UID: "b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.702346 4814 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.702380 4814 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-service-ca\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.702392 4814 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.702404 4814 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.702416 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-np8fb\" (UniqueName: \"kubernetes.io/projected/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-kube-api-access-np8fb\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.702429 4814 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:05 crc kubenswrapper[4814]: I0122 05:30:05.702439 4814 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8-console-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.154568 4814 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jnnrg_b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8/console/0.log" Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.155002 4814 generic.go:334] "Generic (PLEG): container finished" podID="b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" containerID="5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76" exitCode=2 Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.155047 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jnnrg" event={"ID":"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8","Type":"ContainerDied","Data":"5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76"} Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.155084 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jnnrg" event={"ID":"b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8","Type":"ContainerDied","Data":"0fed9d6afb2138ba7d69e904890bd125fab460d26e8ebcc580e0cf23934bea50"} Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.155111 4814 scope.go:117] "RemoveContainer" containerID="5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76" Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.155307 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jnnrg" Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.185903 4814 scope.go:117] "RemoveContainer" containerID="5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76" Jan 22 05:30:06 crc kubenswrapper[4814]: E0122 05:30:06.186518 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76\": container with ID starting with 5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76 not found: ID does not exist" containerID="5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76" Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.186560 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76"} err="failed to get container status \"5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76\": rpc error: code = NotFound desc = could not find container \"5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76\": container with ID starting with 5627d88f02d4e512e41c42f76c73ed608548939439663c7a662347e4fed6cd76 not found: ID does not exist" Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.204867 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-jnnrg"] Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.212829 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-jnnrg"] Jan 22 05:30:06 crc kubenswrapper[4814]: I0122 05:30:06.367448 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" path="/var/lib/kubelet/pods/b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8/volumes" Jan 22 05:30:07 crc kubenswrapper[4814]: I0122 05:30:07.169185 4814 generic.go:334] "Generic (PLEG): container finished" podID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerID="d86d5ff2920d4a3413e46b58600b896558de40ac69834a9a382414b44c6fb8d7" exitCode=0 Jan 22 05:30:07 crc kubenswrapper[4814]: I0122 05:30:07.169245 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" event={"ID":"d55e55c7-0d00-426a-bd2c-9a0cd024865a","Type":"ContainerDied","Data":"d86d5ff2920d4a3413e46b58600b896558de40ac69834a9a382414b44c6fb8d7"} Jan 22 05:30:08 crc kubenswrapper[4814]: I0122 05:30:08.180245 4814 generic.go:334] "Generic (PLEG): container finished" podID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerID="ef835d4a0aa86c17fa2e7cea3c9ac16228d7f3156b5aa8dfa4bb2c0ebb243867" exitCode=0 Jan 22 05:30:08 crc kubenswrapper[4814]: I0122 05:30:08.180315 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" event={"ID":"d55e55c7-0d00-426a-bd2c-9a0cd024865a","Type":"ContainerDied","Data":"ef835d4a0aa86c17fa2e7cea3c9ac16228d7f3156b5aa8dfa4bb2c0ebb243867"} Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.526471 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.658961 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwfp8\" (UniqueName: \"kubernetes.io/projected/d55e55c7-0d00-426a-bd2c-9a0cd024865a-kube-api-access-bwfp8\") pod \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.659105 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-util\") pod \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.659217 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-bundle\") pod \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\" (UID: \"d55e55c7-0d00-426a-bd2c-9a0cd024865a\") " Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.660671 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-bundle" (OuterVolumeSpecName: "bundle") pod "d55e55c7-0d00-426a-bd2c-9a0cd024865a" (UID: "d55e55c7-0d00-426a-bd2c-9a0cd024865a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.666062 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d55e55c7-0d00-426a-bd2c-9a0cd024865a-kube-api-access-bwfp8" (OuterVolumeSpecName: "kube-api-access-bwfp8") pod "d55e55c7-0d00-426a-bd2c-9a0cd024865a" (UID: "d55e55c7-0d00-426a-bd2c-9a0cd024865a"). InnerVolumeSpecName "kube-api-access-bwfp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.680186 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-util" (OuterVolumeSpecName: "util") pod "d55e55c7-0d00-426a-bd2c-9a0cd024865a" (UID: "d55e55c7-0d00-426a-bd2c-9a0cd024865a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.761200 4814 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-util\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.761246 4814 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d55e55c7-0d00-426a-bd2c-9a0cd024865a-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:09 crc kubenswrapper[4814]: I0122 05:30:09.761267 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwfp8\" (UniqueName: \"kubernetes.io/projected/d55e55c7-0d00-426a-bd2c-9a0cd024865a-kube-api-access-bwfp8\") on node \"crc\" DevicePath \"\"" Jan 22 05:30:10 crc kubenswrapper[4814]: I0122 05:30:10.200436 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" event={"ID":"d55e55c7-0d00-426a-bd2c-9a0cd024865a","Type":"ContainerDied","Data":"7ffabebcc527725d80ccee412c0246cfa561c9cb01f9fee4a2bdfed3ca69fd90"} Jan 22 05:30:10 crc kubenswrapper[4814]: I0122 05:30:10.200512 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ffabebcc527725d80ccee412c0246cfa561c9cb01f9fee4a2bdfed3ca69fd90" Jan 22 05:30:10 crc kubenswrapper[4814]: I0122 05:30:10.200543 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhsdx" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.189227 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-76b666586d-cblhh"] Jan 22 05:30:19 crc kubenswrapper[4814]: E0122 05:30:19.189640 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" containerName="console" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.189666 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" containerName="console" Jan 22 05:30:19 crc kubenswrapper[4814]: E0122 05:30:19.189679 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerName="util" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.189685 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerName="util" Jan 22 05:30:19 crc kubenswrapper[4814]: E0122 05:30:19.189696 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerName="pull" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.189703 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerName="pull" Jan 22 05:30:19 crc kubenswrapper[4814]: E0122 05:30:19.189713 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerName="extract" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.189719 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerName="extract" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.189824 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b390cf49-98fc-4d9c-82f0-48a8c6aa9ec8" containerName="console" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.189834 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d55e55c7-0d00-426a-bd2c-9a0cd024865a" containerName="extract" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.190156 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.191967 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.192261 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.196181 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.197759 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-m6bt8" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.199176 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.216802 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-76b666586d-cblhh"] Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.287228 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb5zj\" (UniqueName: \"kubernetes.io/projected/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-kube-api-access-lb5zj\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.287267 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-webhook-cert\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.287287 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-apiservice-cert\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.389000 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb5zj\" (UniqueName: \"kubernetes.io/projected/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-kube-api-access-lb5zj\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.389062 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-webhook-cert\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.389092 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-apiservice-cert\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.394494 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-apiservice-cert\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.395171 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-webhook-cert\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.404322 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb5zj\" (UniqueName: \"kubernetes.io/projected/4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0-kube-api-access-lb5zj\") pod \"metallb-operator-controller-manager-76b666586d-cblhh\" (UID: \"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0\") " pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.508576 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.521369 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p"] Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.522280 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.527844 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-zkbv2" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.527885 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.528060 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.542280 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p"] Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.591395 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bmr4\" (UniqueName: \"kubernetes.io/projected/d2e0270d-6522-41e4-82c3-6ca820f98bc4-kube-api-access-5bmr4\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.591459 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d2e0270d-6522-41e4-82c3-6ca820f98bc4-apiservice-cert\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.591494 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d2e0270d-6522-41e4-82c3-6ca820f98bc4-webhook-cert\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.613742 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.614103 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.692227 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bmr4\" (UniqueName: \"kubernetes.io/projected/d2e0270d-6522-41e4-82c3-6ca820f98bc4-kube-api-access-5bmr4\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.692297 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d2e0270d-6522-41e4-82c3-6ca820f98bc4-apiservice-cert\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.692335 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d2e0270d-6522-41e4-82c3-6ca820f98bc4-webhook-cert\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.701213 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d2e0270d-6522-41e4-82c3-6ca820f98bc4-apiservice-cert\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.708857 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d2e0270d-6522-41e4-82c3-6ca820f98bc4-webhook-cert\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.714556 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bmr4\" (UniqueName: \"kubernetes.io/projected/d2e0270d-6522-41e4-82c3-6ca820f98bc4-kube-api-access-5bmr4\") pod \"metallb-operator-webhook-server-554d5b45fc-jkt2p\" (UID: \"d2e0270d-6522-41e4-82c3-6ca820f98bc4\") " pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.882832 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-76b666586d-cblhh"] Jan 22 05:30:19 crc kubenswrapper[4814]: W0122 05:30:19.889278 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f52f543_bd4a_4d8c_8cf4_b6d6c77a7af0.slice/crio-9513a8dd71bc68f9bb3eda23fd96db27b0d213e982aaef694eac517dbd1b7ce4 WatchSource:0}: Error finding container 9513a8dd71bc68f9bb3eda23fd96db27b0d213e982aaef694eac517dbd1b7ce4: Status 404 returned error can't find the container with id 9513a8dd71bc68f9bb3eda23fd96db27b0d213e982aaef694eac517dbd1b7ce4 Jan 22 05:30:19 crc kubenswrapper[4814]: I0122 05:30:19.928553 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:20 crc kubenswrapper[4814]: I0122 05:30:20.218390 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p"] Jan 22 05:30:20 crc kubenswrapper[4814]: I0122 05:30:20.267824 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" event={"ID":"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0","Type":"ContainerStarted","Data":"9513a8dd71bc68f9bb3eda23fd96db27b0d213e982aaef694eac517dbd1b7ce4"} Jan 22 05:30:20 crc kubenswrapper[4814]: I0122 05:30:20.269155 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" event={"ID":"d2e0270d-6522-41e4-82c3-6ca820f98bc4","Type":"ContainerStarted","Data":"0b979d79d33415d2d83aac27e6964e77c007faf3ca555b425c5aeb6dd0a2a580"} Jan 22 05:30:23 crc kubenswrapper[4814]: I0122 05:30:23.290014 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" event={"ID":"4f52f543-bd4a-4d8c-8cf4-b6d6c77a7af0","Type":"ContainerStarted","Data":"73b1939f166106ba23e6cf1a8648c5a4a730c11c8d0d0ec32a0284e53acde298"} Jan 22 05:30:23 crc kubenswrapper[4814]: I0122 05:30:23.290272 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:30:23 crc kubenswrapper[4814]: I0122 05:30:23.313600 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" podStartSLOduration=1.174390629 podStartE2EDuration="4.313581735s" podCreationTimestamp="2026-01-22 05:30:19 +0000 UTC" firstStartedPulling="2026-01-22 05:30:19.896092436 +0000 UTC m=+705.979580651" lastFinishedPulling="2026-01-22 05:30:23.035283542 +0000 UTC m=+709.118771757" observedRunningTime="2026-01-22 05:30:23.312873203 +0000 UTC m=+709.396361418" watchObservedRunningTime="2026-01-22 05:30:23.313581735 +0000 UTC m=+709.397069950" Jan 22 05:30:25 crc kubenswrapper[4814]: I0122 05:30:25.301803 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" event={"ID":"d2e0270d-6522-41e4-82c3-6ca820f98bc4","Type":"ContainerStarted","Data":"fd1d60910e135e18fd89b978c1db38c780dac3532dfa4635ad19afb7c5d57ee1"} Jan 22 05:30:25 crc kubenswrapper[4814]: I0122 05:30:25.302066 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:25 crc kubenswrapper[4814]: I0122 05:30:25.320923 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" podStartSLOduration=1.810893231 podStartE2EDuration="6.32090282s" podCreationTimestamp="2026-01-22 05:30:19 +0000 UTC" firstStartedPulling="2026-01-22 05:30:20.242316464 +0000 UTC m=+706.325804679" lastFinishedPulling="2026-01-22 05:30:24.752326053 +0000 UTC m=+710.835814268" observedRunningTime="2026-01-22 05:30:25.316760101 +0000 UTC m=+711.400248336" watchObservedRunningTime="2026-01-22 05:30:25.32090282 +0000 UTC m=+711.404391045" Jan 22 05:30:39 crc kubenswrapper[4814]: I0122 05:30:39.937874 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-554d5b45fc-jkt2p" Jan 22 05:30:49 crc kubenswrapper[4814]: I0122 05:30:49.613833 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:30:49 crc kubenswrapper[4814]: I0122 05:30:49.614439 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:30:59 crc kubenswrapper[4814]: I0122 05:30:59.511251 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-76b666586d-cblhh" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.324275 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m"] Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.324911 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.331094 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.331445 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-5xxm9" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.341167 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-5cxn8"] Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.343388 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.345675 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.347254 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.350802 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m"] Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.430471 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-lphk6"] Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.431289 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433195 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ce290094-b837-4566-baf6-c829a8dff794-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-2q58m\" (UID: \"ce290094-b837-4566-baf6-c829a8dff794\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433220 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7jbf\" (UniqueName: \"kubernetes.io/projected/913d532c-5441-4389-bfdd-ab39eb03619e-kube-api-access-v7jbf\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433244 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-frr-conf\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433261 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-metrics\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433327 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-reloader\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433453 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-frr-sockets\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433566 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/913d532c-5441-4389-bfdd-ab39eb03619e-frr-startup\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433688 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qwsl\" (UniqueName: \"kubernetes.io/projected/ce290094-b837-4566-baf6-c829a8dff794-kube-api-access-5qwsl\") pod \"frr-k8s-webhook-server-7df86c4f6c-2q58m\" (UID: \"ce290094-b837-4566-baf6-c829a8dff794\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.433732 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/913d532c-5441-4389-bfdd-ab39eb03619e-metrics-certs\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.434432 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-57bl9" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.434642 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.435547 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.435738 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.464673 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-n5nbc"] Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.465656 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.467270 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.474732 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-n5nbc"] Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535699 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpmpc\" (UniqueName: \"kubernetes.io/projected/befbc9d4-fc13-4730-af4d-8fe4e9186133-kube-api-access-qpmpc\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535758 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qwsl\" (UniqueName: \"kubernetes.io/projected/ce290094-b837-4566-baf6-c829a8dff794-kube-api-access-5qwsl\") pod \"frr-k8s-webhook-server-7df86c4f6c-2q58m\" (UID: \"ce290094-b837-4566-baf6-c829a8dff794\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535784 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metrics-certs\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535808 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/913d532c-5441-4389-bfdd-ab39eb03619e-metrics-certs\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535837 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/befbc9d4-fc13-4730-af4d-8fe4e9186133-cert\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535864 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7jbf\" (UniqueName: \"kubernetes.io/projected/913d532c-5441-4389-bfdd-ab39eb03619e-kube-api-access-v7jbf\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535886 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ce290094-b837-4566-baf6-c829a8dff794-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-2q58m\" (UID: \"ce290094-b837-4566-baf6-c829a8dff794\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535911 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-frr-conf\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535929 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-metrics\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535944 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-reloader\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535967 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-frr-sockets\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.535994 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.536021 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/befbc9d4-fc13-4730-af4d-8fe4e9186133-metrics-certs\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.536039 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcp4j\" (UniqueName: \"kubernetes.io/projected/cc8eab45-1359-4e15-8b68-db04f97ac2b2-kube-api-access-hcp4j\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.536063 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metallb-excludel2\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.536096 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/913d532c-5441-4389-bfdd-ab39eb03619e-frr-startup\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.536937 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/913d532c-5441-4389-bfdd-ab39eb03619e-frr-startup\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: E0122 05:31:00.537311 4814 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 22 05:31:00 crc kubenswrapper[4814]: E0122 05:31:00.537352 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/913d532c-5441-4389-bfdd-ab39eb03619e-metrics-certs podName:913d532c-5441-4389-bfdd-ab39eb03619e nodeName:}" failed. No retries permitted until 2026-01-22 05:31:01.037339901 +0000 UTC m=+747.120828116 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/913d532c-5441-4389-bfdd-ab39eb03619e-metrics-certs") pod "frr-k8s-5cxn8" (UID: "913d532c-5441-4389-bfdd-ab39eb03619e") : secret "frr-k8s-certs-secret" not found Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.545864 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-metrics\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.546020 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-reloader\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.548146 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-frr-sockets\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.548170 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/913d532c-5441-4389-bfdd-ab39eb03619e-frr-conf\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.562352 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ce290094-b837-4566-baf6-c829a8dff794-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-2q58m\" (UID: \"ce290094-b837-4566-baf6-c829a8dff794\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.562463 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qwsl\" (UniqueName: \"kubernetes.io/projected/ce290094-b837-4566-baf6-c829a8dff794-kube-api-access-5qwsl\") pod \"frr-k8s-webhook-server-7df86c4f6c-2q58m\" (UID: \"ce290094-b837-4566-baf6-c829a8dff794\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.565843 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7jbf\" (UniqueName: \"kubernetes.io/projected/913d532c-5441-4389-bfdd-ab39eb03619e-kube-api-access-v7jbf\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.637491 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.637535 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/befbc9d4-fc13-4730-af4d-8fe4e9186133-metrics-certs\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.637559 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcp4j\" (UniqueName: \"kubernetes.io/projected/cc8eab45-1359-4e15-8b68-db04f97ac2b2-kube-api-access-hcp4j\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.637577 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metallb-excludel2\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.637612 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpmpc\" (UniqueName: \"kubernetes.io/projected/befbc9d4-fc13-4730-af4d-8fe4e9186133-kube-api-access-qpmpc\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.637677 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metrics-certs\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: E0122 05:31:00.637685 4814 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.637720 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/befbc9d4-fc13-4730-af4d-8fe4e9186133-cert\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: E0122 05:31:00.637761 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist podName:cc8eab45-1359-4e15-8b68-db04f97ac2b2 nodeName:}" failed. No retries permitted until 2026-01-22 05:31:01.137742959 +0000 UTC m=+747.221231174 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist") pod "speaker-lphk6" (UID: "cc8eab45-1359-4e15-8b68-db04f97ac2b2") : secret "metallb-memberlist" not found Jan 22 05:31:00 crc kubenswrapper[4814]: E0122 05:31:00.638268 4814 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 22 05:31:00 crc kubenswrapper[4814]: E0122 05:31:00.638325 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metrics-certs podName:cc8eab45-1359-4e15-8b68-db04f97ac2b2 nodeName:}" failed. No retries permitted until 2026-01-22 05:31:01.138311786 +0000 UTC m=+747.221799991 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metrics-certs") pod "speaker-lphk6" (UID: "cc8eab45-1359-4e15-8b68-db04f97ac2b2") : secret "speaker-certs-secret" not found Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.638769 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metallb-excludel2\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.639786 4814 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.642141 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.642516 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/befbc9d4-fc13-4730-af4d-8fe4e9186133-metrics-certs\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.650674 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/befbc9d4-fc13-4730-af4d-8fe4e9186133-cert\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.653735 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcp4j\" (UniqueName: \"kubernetes.io/projected/cc8eab45-1359-4e15-8b68-db04f97ac2b2-kube-api-access-hcp4j\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.656924 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpmpc\" (UniqueName: \"kubernetes.io/projected/befbc9d4-fc13-4730-af4d-8fe4e9186133-kube-api-access-qpmpc\") pod \"controller-6968d8fdc4-n5nbc\" (UID: \"befbc9d4-fc13-4730-af4d-8fe4e9186133\") " pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.780742 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:00 crc kubenswrapper[4814]: I0122 05:31:00.857857 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m"] Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.052175 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/913d532c-5441-4389-bfdd-ab39eb03619e-metrics-certs\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.056934 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/913d532c-5441-4389-bfdd-ab39eb03619e-metrics-certs\") pod \"frr-k8s-5cxn8\" (UID: \"913d532c-5441-4389-bfdd-ab39eb03619e\") " pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.153505 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.153680 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metrics-certs\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:01 crc kubenswrapper[4814]: E0122 05:31:01.153788 4814 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 22 05:31:01 crc kubenswrapper[4814]: E0122 05:31:01.153894 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist podName:cc8eab45-1359-4e15-8b68-db04f97ac2b2 nodeName:}" failed. No retries permitted until 2026-01-22 05:31:02.153867532 +0000 UTC m=+748.237355777 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist") pod "speaker-lphk6" (UID: "cc8eab45-1359-4e15-8b68-db04f97ac2b2") : secret "metallb-memberlist" not found Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.160559 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-metrics-certs\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.212386 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-n5nbc"] Jan 22 05:31:01 crc kubenswrapper[4814]: W0122 05:31:01.214809 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbefbc9d4_fc13_4730_af4d_8fe4e9186133.slice/crio-f5286d0f2af3ffae00cb4986d2ddde688f6405fe563dba81d91b0940d1fdc929 WatchSource:0}: Error finding container f5286d0f2af3ffae00cb4986d2ddde688f6405fe563dba81d91b0940d1fdc929: Status 404 returned error can't find the container with id f5286d0f2af3ffae00cb4986d2ddde688f6405fe563dba81d91b0940d1fdc929 Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.255182 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.542035 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-n5nbc" event={"ID":"befbc9d4-fc13-4730-af4d-8fe4e9186133","Type":"ContainerStarted","Data":"53a674cc66f26cb277c11a2d9b0c5804f1c210842d19b0d5a8122c9e1f2b4159"} Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.542392 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-n5nbc" event={"ID":"befbc9d4-fc13-4730-af4d-8fe4e9186133","Type":"ContainerStarted","Data":"945266351f258e6213a2c31e6af894ac6ba3f366430d79cd0a45ee5fdaa2c164"} Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.542405 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-n5nbc" event={"ID":"befbc9d4-fc13-4730-af4d-8fe4e9186133","Type":"ContainerStarted","Data":"f5286d0f2af3ffae00cb4986d2ddde688f6405fe563dba81d91b0940d1fdc929"} Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.543478 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.544090 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" event={"ID":"ce290094-b837-4566-baf6-c829a8dff794","Type":"ContainerStarted","Data":"27503859b6369e39ef1eb86a881945854b14d4661ad30fc123e4aadbda524087"} Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.544751 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerStarted","Data":"ffbd293472298951860f11078418a7c590a573746bd774994d1408db8ab603d6"} Jan 22 05:31:01 crc kubenswrapper[4814]: I0122 05:31:01.561956 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-n5nbc" podStartSLOduration=1.5619372 podStartE2EDuration="1.5619372s" podCreationTimestamp="2026-01-22 05:31:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:31:01.561012882 +0000 UTC m=+747.644501097" watchObservedRunningTime="2026-01-22 05:31:01.5619372 +0000 UTC m=+747.645425415" Jan 22 05:31:02 crc kubenswrapper[4814]: I0122 05:31:02.168789 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:02 crc kubenswrapper[4814]: I0122 05:31:02.178811 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/cc8eab45-1359-4e15-8b68-db04f97ac2b2-memberlist\") pod \"speaker-lphk6\" (UID: \"cc8eab45-1359-4e15-8b68-db04f97ac2b2\") " pod="metallb-system/speaker-lphk6" Jan 22 05:31:02 crc kubenswrapper[4814]: I0122 05:31:02.244553 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-lphk6" Jan 22 05:31:02 crc kubenswrapper[4814]: W0122 05:31:02.262973 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc8eab45_1359_4e15_8b68_db04f97ac2b2.slice/crio-98d4d8c87037bc629d05dd099afdc101e4642e02868b1d898075a16805a553c1 WatchSource:0}: Error finding container 98d4d8c87037bc629d05dd099afdc101e4642e02868b1d898075a16805a553c1: Status 404 returned error can't find the container with id 98d4d8c87037bc629d05dd099afdc101e4642e02868b1d898075a16805a553c1 Jan 22 05:31:02 crc kubenswrapper[4814]: I0122 05:31:02.554138 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lphk6" event={"ID":"cc8eab45-1359-4e15-8b68-db04f97ac2b2","Type":"ContainerStarted","Data":"de09b566c5f0a2928854a5ffb703ee46bf756447d78054559505feffd0856ffc"} Jan 22 05:31:02 crc kubenswrapper[4814]: I0122 05:31:02.554182 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lphk6" event={"ID":"cc8eab45-1359-4e15-8b68-db04f97ac2b2","Type":"ContainerStarted","Data":"98d4d8c87037bc629d05dd099afdc101e4642e02868b1d898075a16805a553c1"} Jan 22 05:31:03 crc kubenswrapper[4814]: I0122 05:31:03.562570 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lphk6" event={"ID":"cc8eab45-1359-4e15-8b68-db04f97ac2b2","Type":"ContainerStarted","Data":"4d4f683065ba654641baabc83fa22562ad505096946bde6657c041fbe4ab3fe4"} Jan 22 05:31:04 crc kubenswrapper[4814]: I0122 05:31:04.363616 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-lphk6" podStartSLOduration=4.363598091 podStartE2EDuration="4.363598091s" podCreationTimestamp="2026-01-22 05:31:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:31:03.595133653 +0000 UTC m=+749.678621858" watchObservedRunningTime="2026-01-22 05:31:04.363598091 +0000 UTC m=+750.447086306" Jan 22 05:31:04 crc kubenswrapper[4814]: I0122 05:31:04.567801 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-lphk6" Jan 22 05:31:08 crc kubenswrapper[4814]: I0122 05:31:08.191380 4814 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 22 05:31:10 crc kubenswrapper[4814]: I0122 05:31:10.601094 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" event={"ID":"ce290094-b837-4566-baf6-c829a8dff794","Type":"ContainerStarted","Data":"9e62ba61c3093d0436e7c295f15200797dc8538d44497b0314640b05b045a99b"} Jan 22 05:31:10 crc kubenswrapper[4814]: I0122 05:31:10.601650 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:10 crc kubenswrapper[4814]: I0122 05:31:10.603579 4814 generic.go:334] "Generic (PLEG): container finished" podID="913d532c-5441-4389-bfdd-ab39eb03619e" containerID="97168cf869ea54f3a97b63e33391bc443fdeb651d7767cd445b4249050f33634" exitCode=0 Jan 22 05:31:10 crc kubenswrapper[4814]: I0122 05:31:10.603615 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerDied","Data":"97168cf869ea54f3a97b63e33391bc443fdeb651d7767cd445b4249050f33634"} Jan 22 05:31:10 crc kubenswrapper[4814]: I0122 05:31:10.633729 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" podStartSLOduration=1.99642086 podStartE2EDuration="10.633704091s" podCreationTimestamp="2026-01-22 05:31:00 +0000 UTC" firstStartedPulling="2026-01-22 05:31:00.881885877 +0000 UTC m=+746.965374093" lastFinishedPulling="2026-01-22 05:31:09.519169109 +0000 UTC m=+755.602657324" observedRunningTime="2026-01-22 05:31:10.62240299 +0000 UTC m=+756.705891235" watchObservedRunningTime="2026-01-22 05:31:10.633704091 +0000 UTC m=+756.717192346" Jan 22 05:31:11 crc kubenswrapper[4814]: I0122 05:31:11.610388 4814 generic.go:334] "Generic (PLEG): container finished" podID="913d532c-5441-4389-bfdd-ab39eb03619e" containerID="1b691738ae40b59ab02de9d063d146603f8438636f2a71770e0b976acffbd985" exitCode=0 Jan 22 05:31:11 crc kubenswrapper[4814]: I0122 05:31:11.610471 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerDied","Data":"1b691738ae40b59ab02de9d063d146603f8438636f2a71770e0b976acffbd985"} Jan 22 05:31:12 crc kubenswrapper[4814]: I0122 05:31:12.248841 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-lphk6" Jan 22 05:31:12 crc kubenswrapper[4814]: I0122 05:31:12.621933 4814 generic.go:334] "Generic (PLEG): container finished" podID="913d532c-5441-4389-bfdd-ab39eb03619e" containerID="9457257d993e76c5d8907cefe5f97d9f0a35ecc6dd935c70f20fe5fc1c3159e6" exitCode=0 Jan 22 05:31:12 crc kubenswrapper[4814]: I0122 05:31:12.622010 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerDied","Data":"9457257d993e76c5d8907cefe5f97d9f0a35ecc6dd935c70f20fe5fc1c3159e6"} Jan 22 05:31:13 crc kubenswrapper[4814]: I0122 05:31:13.653916 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerStarted","Data":"360a94e03e7b9d4ddf7c89d599a6fe804d43bffa9df800d46e5a611d701bb36f"} Jan 22 05:31:13 crc kubenswrapper[4814]: I0122 05:31:13.654145 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerStarted","Data":"38f18dc77129bd30d5001b1afb1f69d68bdc8f2f5a7f6ceb861b54b8074bf736"} Jan 22 05:31:13 crc kubenswrapper[4814]: I0122 05:31:13.654154 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerStarted","Data":"3e4847f1edba8904b20cb0cc1bc2f844cc68887971817dfa2a77194246078d50"} Jan 22 05:31:13 crc kubenswrapper[4814]: I0122 05:31:13.654162 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerStarted","Data":"f0d2a75ebc1539404700382d656f098ae9f5e1ea4679724bf92cdb01dff4154a"} Jan 22 05:31:13 crc kubenswrapper[4814]: I0122 05:31:13.654169 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerStarted","Data":"458b451823de29695c624765e275a6f8cf396a2ee9ce0ae63e057fd770b219f7"} Jan 22 05:31:14 crc kubenswrapper[4814]: I0122 05:31:14.667788 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5cxn8" event={"ID":"913d532c-5441-4389-bfdd-ab39eb03619e","Type":"ContainerStarted","Data":"bd64cf7292818aa699f262601507241eec41ed80f9016f6cc8687ad3e9733f53"} Jan 22 05:31:14 crc kubenswrapper[4814]: I0122 05:31:14.668817 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:14 crc kubenswrapper[4814]: I0122 05:31:14.700757 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-5cxn8" podStartSLOduration=6.543688013 podStartE2EDuration="14.700735775s" podCreationTimestamp="2026-01-22 05:31:00 +0000 UTC" firstStartedPulling="2026-01-22 05:31:01.346128201 +0000 UTC m=+747.429616416" lastFinishedPulling="2026-01-22 05:31:09.503175963 +0000 UTC m=+755.586664178" observedRunningTime="2026-01-22 05:31:14.697125523 +0000 UTC m=+760.780613768" watchObservedRunningTime="2026-01-22 05:31:14.700735775 +0000 UTC m=+760.784224000" Jan 22 05:31:16 crc kubenswrapper[4814]: I0122 05:31:16.255404 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:16 crc kubenswrapper[4814]: I0122 05:31:16.303396 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.089696 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-fms7f"] Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.090615 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.092053 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-pz646" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.092650 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.092936 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.099420 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-fms7f"] Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.208376 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp458\" (UniqueName: \"kubernetes.io/projected/f9b73f1f-ed62-4a5a-877d-4db66a2d74c4-kube-api-access-fp458\") pod \"openstack-operator-index-fms7f\" (UID: \"f9b73f1f-ed62-4a5a-877d-4db66a2d74c4\") " pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.309614 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp458\" (UniqueName: \"kubernetes.io/projected/f9b73f1f-ed62-4a5a-877d-4db66a2d74c4-kube-api-access-fp458\") pod \"openstack-operator-index-fms7f\" (UID: \"f9b73f1f-ed62-4a5a-877d-4db66a2d74c4\") " pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.332995 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp458\" (UniqueName: \"kubernetes.io/projected/f9b73f1f-ed62-4a5a-877d-4db66a2d74c4-kube-api-access-fp458\") pod \"openstack-operator-index-fms7f\" (UID: \"f9b73f1f-ed62-4a5a-877d-4db66a2d74c4\") " pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.403654 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.615133 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.615490 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.615535 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.616187 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b60d5a55f7f3e7c7e151368bd532eb06ab5f80edff26a6360b765f6b4951f49e"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.616242 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://b60d5a55f7f3e7c7e151368bd532eb06ab5f80edff26a6360b765f6b4951f49e" gracePeriod=600 Jan 22 05:31:19 crc kubenswrapper[4814]: I0122 05:31:19.699078 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-fms7f"] Jan 22 05:31:20 crc kubenswrapper[4814]: I0122 05:31:20.647676 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" Jan 22 05:31:20 crc kubenswrapper[4814]: I0122 05:31:20.730964 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fms7f" event={"ID":"f9b73f1f-ed62-4a5a-877d-4db66a2d74c4","Type":"ContainerStarted","Data":"50f5c9361ba5e50cff36c8ff733b7514b77b0e4eee6ff21e80e49df5fb743a11"} Jan 22 05:31:20 crc kubenswrapper[4814]: I0122 05:31:20.742414 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="b60d5a55f7f3e7c7e151368bd532eb06ab5f80edff26a6360b765f6b4951f49e" exitCode=0 Jan 22 05:31:20 crc kubenswrapper[4814]: I0122 05:31:20.742410 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"b60d5a55f7f3e7c7e151368bd532eb06ab5f80edff26a6360b765f6b4951f49e"} Jan 22 05:31:20 crc kubenswrapper[4814]: I0122 05:31:20.742511 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"9500dee208774edd1316e9481891ac3158cca3bdb31ab2aefff48638b4f8e29b"} Jan 22 05:31:20 crc kubenswrapper[4814]: I0122 05:31:20.742537 4814 scope.go:117] "RemoveContainer" containerID="9f2f025680c6fd4e90a0353a156dc3eb3a96411365552c383a9346ea5768b5f9" Jan 22 05:31:20 crc kubenswrapper[4814]: I0122 05:31:20.787465 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-n5nbc" Jan 22 05:31:23 crc kubenswrapper[4814]: I0122 05:31:23.766059 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fms7f" event={"ID":"f9b73f1f-ed62-4a5a-877d-4db66a2d74c4","Type":"ContainerStarted","Data":"188b03404db8807dc7cd4c065fe13d53df05aed37aa44c63e66c7c35e27f6e40"} Jan 22 05:31:23 crc kubenswrapper[4814]: I0122 05:31:23.788041 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-fms7f" podStartSLOduration=1.683784903 podStartE2EDuration="4.788018517s" podCreationTimestamp="2026-01-22 05:31:19 +0000 UTC" firstStartedPulling="2026-01-22 05:31:19.706672698 +0000 UTC m=+765.790160923" lastFinishedPulling="2026-01-22 05:31:22.810906282 +0000 UTC m=+768.894394537" observedRunningTime="2026-01-22 05:31:23.784790348 +0000 UTC m=+769.868278603" watchObservedRunningTime="2026-01-22 05:31:23.788018517 +0000 UTC m=+769.871506772" Jan 22 05:31:29 crc kubenswrapper[4814]: I0122 05:31:29.403917 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:29 crc kubenswrapper[4814]: I0122 05:31:29.404789 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:29 crc kubenswrapper[4814]: I0122 05:31:29.439597 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:29 crc kubenswrapper[4814]: I0122 05:31:29.833790 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-fms7f" Jan 22 05:31:31 crc kubenswrapper[4814]: I0122 05:31:31.259734 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-5cxn8" Jan 22 05:31:33 crc kubenswrapper[4814]: I0122 05:31:33.899546 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-r8vbv"] Jan 22 05:31:33 crc kubenswrapper[4814]: I0122 05:31:33.901497 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:33 crc kubenswrapper[4814]: I0122 05:31:33.927464 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r8vbv"] Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.014250 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-utilities\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.014735 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-catalog-content\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.015483 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qd7j\" (UniqueName: \"kubernetes.io/projected/a04966de-2941-4cca-b241-21c2dd9bcba8-kube-api-access-9qd7j\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.117313 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-utilities\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.117368 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-catalog-content\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.117414 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qd7j\" (UniqueName: \"kubernetes.io/projected/a04966de-2941-4cca-b241-21c2dd9bcba8-kube-api-access-9qd7j\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.118870 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-utilities\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.118941 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-catalog-content\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.149582 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qd7j\" (UniqueName: \"kubernetes.io/projected/a04966de-2941-4cca-b241-21c2dd9bcba8-kube-api-access-9qd7j\") pod \"redhat-marketplace-r8vbv\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.228411 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.523184 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r8vbv"] Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.843682 4814 generic.go:334] "Generic (PLEG): container finished" podID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerID="7e35bbb7b5e8612050ca36102c97c0ebcdf3ce443f78a16e4c020a3d9f445300" exitCode=0 Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.843722 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r8vbv" event={"ID":"a04966de-2941-4cca-b241-21c2dd9bcba8","Type":"ContainerDied","Data":"7e35bbb7b5e8612050ca36102c97c0ebcdf3ce443f78a16e4c020a3d9f445300"} Jan 22 05:31:34 crc kubenswrapper[4814]: I0122 05:31:34.843745 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r8vbv" event={"ID":"a04966de-2941-4cca-b241-21c2dd9bcba8","Type":"ContainerStarted","Data":"a97b69806dad784a4671c50919c7189449b827c4b91e9f29bb5ee2708a2ec12a"} Jan 22 05:31:36 crc kubenswrapper[4814]: I0122 05:31:36.866611 4814 generic.go:334] "Generic (PLEG): container finished" podID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerID="ba9d5fe5399fde0df902677ecca627d0bf1025c42d95039ca33c0c109beea855" exitCode=0 Jan 22 05:31:36 crc kubenswrapper[4814]: I0122 05:31:36.867364 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r8vbv" event={"ID":"a04966de-2941-4cca-b241-21c2dd9bcba8","Type":"ContainerDied","Data":"ba9d5fe5399fde0df902677ecca627d0bf1025c42d95039ca33c0c109beea855"} Jan 22 05:31:37 crc kubenswrapper[4814]: I0122 05:31:37.874328 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r8vbv" event={"ID":"a04966de-2941-4cca-b241-21c2dd9bcba8","Type":"ContainerStarted","Data":"d63f0605eb8c8d88f3506f049ac567548007c9ee78db3271bc9867d38ca2ab6f"} Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.133109 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-r8vbv" podStartSLOduration=2.598420253 podStartE2EDuration="5.133093503s" podCreationTimestamp="2026-01-22 05:31:33 +0000 UTC" firstStartedPulling="2026-01-22 05:31:34.845152947 +0000 UTC m=+780.928641162" lastFinishedPulling="2026-01-22 05:31:37.379826147 +0000 UTC m=+783.463314412" observedRunningTime="2026-01-22 05:31:37.891174683 +0000 UTC m=+783.974662898" watchObservedRunningTime="2026-01-22 05:31:38.133093503 +0000 UTC m=+784.216581718" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.134651 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr"] Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.135772 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.138507 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-mhx2d" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.153839 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr"] Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.276890 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-util\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.277176 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-bundle\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.277328 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpmxw\" (UniqueName: \"kubernetes.io/projected/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-kube-api-access-gpmxw\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.377996 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpmxw\" (UniqueName: \"kubernetes.io/projected/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-kube-api-access-gpmxw\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.378058 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-util\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.378079 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-bundle\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.378539 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-bundle\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.378671 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-util\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.403378 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpmxw\" (UniqueName: \"kubernetes.io/projected/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-kube-api-access-gpmxw\") pod \"a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.450927 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.652403 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr"] Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.880481 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" event={"ID":"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5","Type":"ContainerStarted","Data":"13317bd1cc8dc2b242f753d3abd70cd0a1f4fd194bb484942fadb1b747505985"} Jan 22 05:31:38 crc kubenswrapper[4814]: I0122 05:31:38.880536 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" event={"ID":"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5","Type":"ContainerStarted","Data":"efe6dab2608222a8d4e265ecc6085a451866506644648f4fa48ef5f42ce952b1"} Jan 22 05:31:39 crc kubenswrapper[4814]: I0122 05:31:39.890869 4814 generic.go:334] "Generic (PLEG): container finished" podID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerID="13317bd1cc8dc2b242f753d3abd70cd0a1f4fd194bb484942fadb1b747505985" exitCode=0 Jan 22 05:31:39 crc kubenswrapper[4814]: I0122 05:31:39.891008 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" event={"ID":"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5","Type":"ContainerDied","Data":"13317bd1cc8dc2b242f753d3abd70cd0a1f4fd194bb484942fadb1b747505985"} Jan 22 05:31:40 crc kubenswrapper[4814]: I0122 05:31:40.901992 4814 generic.go:334] "Generic (PLEG): container finished" podID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerID="62f0f10ebb2e5f733879ab945734e515165f5ea55b4d3acd23f96b0eadabf1c5" exitCode=0 Jan 22 05:31:40 crc kubenswrapper[4814]: I0122 05:31:40.902071 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" event={"ID":"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5","Type":"ContainerDied","Data":"62f0f10ebb2e5f733879ab945734e515165f5ea55b4d3acd23f96b0eadabf1c5"} Jan 22 05:31:41 crc kubenswrapper[4814]: I0122 05:31:41.921199 4814 generic.go:334] "Generic (PLEG): container finished" podID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerID="23bc3d707fc535b392cf1c1dba4620744d0db344a021eda1a1960a0dee377d11" exitCode=0 Jan 22 05:31:41 crc kubenswrapper[4814]: I0122 05:31:41.922390 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" event={"ID":"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5","Type":"ContainerDied","Data":"23bc3d707fc535b392cf1c1dba4620744d0db344a021eda1a1960a0dee377d11"} Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.275021 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.449526 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-bundle\") pod \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.449722 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-util\") pod \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.449879 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpmxw\" (UniqueName: \"kubernetes.io/projected/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-kube-api-access-gpmxw\") pod \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\" (UID: \"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5\") " Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.450561 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-bundle" (OuterVolumeSpecName: "bundle") pod "8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" (UID: "8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.461843 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-kube-api-access-gpmxw" (OuterVolumeSpecName: "kube-api-access-gpmxw") pod "8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" (UID: "8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5"). InnerVolumeSpecName "kube-api-access-gpmxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.463219 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-util" (OuterVolumeSpecName: "util") pod "8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" (UID: "8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.552412 4814 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.552461 4814 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-util\") on node \"crc\" DevicePath \"\"" Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.552480 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpmxw\" (UniqueName: \"kubernetes.io/projected/8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5-kube-api-access-gpmxw\") on node \"crc\" DevicePath \"\"" Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.939902 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" event={"ID":"8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5","Type":"ContainerDied","Data":"efe6dab2608222a8d4e265ecc6085a451866506644648f4fa48ef5f42ce952b1"} Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.939956 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="efe6dab2608222a8d4e265ecc6085a451866506644648f4fa48ef5f42ce952b1" Jan 22 05:31:43 crc kubenswrapper[4814]: I0122 05:31:43.939974 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a8cfb4579d62c7e227d272a6b248f738b3c76c2f9443d60bd934756269v6whr" Jan 22 05:31:44 crc kubenswrapper[4814]: I0122 05:31:44.229004 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:44 crc kubenswrapper[4814]: I0122 05:31:44.229388 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:44 crc kubenswrapper[4814]: I0122 05:31:44.311138 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:45 crc kubenswrapper[4814]: I0122 05:31:45.041980 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:46 crc kubenswrapper[4814]: I0122 05:31:46.082678 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r8vbv"] Jan 22 05:31:46 crc kubenswrapper[4814]: I0122 05:31:46.967557 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-r8vbv" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerName="registry-server" containerID="cri-o://d63f0605eb8c8d88f3506f049ac567548007c9ee78db3271bc9867d38ca2ab6f" gracePeriod=2 Jan 22 05:31:47 crc kubenswrapper[4814]: I0122 05:31:47.974191 4814 generic.go:334] "Generic (PLEG): container finished" podID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerID="d63f0605eb8c8d88f3506f049ac567548007c9ee78db3271bc9867d38ca2ab6f" exitCode=0 Jan 22 05:31:47 crc kubenswrapper[4814]: I0122 05:31:47.974899 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r8vbv" event={"ID":"a04966de-2941-4cca-b241-21c2dd9bcba8","Type":"ContainerDied","Data":"d63f0605eb8c8d88f3506f049ac567548007c9ee78db3271bc9867d38ca2ab6f"} Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.103027 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-557998694f-5nj7s"] Jan 22 05:31:48 crc kubenswrapper[4814]: E0122 05:31:48.103259 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerName="util" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.103275 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerName="util" Jan 22 05:31:48 crc kubenswrapper[4814]: E0122 05:31:48.103290 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerName="extract" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.103298 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerName="extract" Jan 22 05:31:48 crc kubenswrapper[4814]: E0122 05:31:48.103319 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerName="pull" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.103325 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerName="pull" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.103421 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a0c07b6-a86d-4a3c-af2a-a0d2f585bec5" containerName="extract" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.103773 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.105877 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-8dcxq" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.128080 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-557998694f-5nj7s"] Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.215615 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t64p7\" (UniqueName: \"kubernetes.io/projected/23e8e8f1-5804-407e-a3c0-8f0d7d2c89e4-kube-api-access-t64p7\") pod \"openstack-operator-controller-init-557998694f-5nj7s\" (UID: \"23e8e8f1-5804-407e-a3c0-8f0d7d2c89e4\") " pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.316480 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t64p7\" (UniqueName: \"kubernetes.io/projected/23e8e8f1-5804-407e-a3c0-8f0d7d2c89e4-kube-api-access-t64p7\") pod \"openstack-operator-controller-init-557998694f-5nj7s\" (UID: \"23e8e8f1-5804-407e-a3c0-8f0d7d2c89e4\") " pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.338360 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t64p7\" (UniqueName: \"kubernetes.io/projected/23e8e8f1-5804-407e-a3c0-8f0d7d2c89e4-kube-api-access-t64p7\") pod \"openstack-operator-controller-init-557998694f-5nj7s\" (UID: \"23e8e8f1-5804-407e-a3c0-8f0d7d2c89e4\") " pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.418156 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.471950 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.621014 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qd7j\" (UniqueName: \"kubernetes.io/projected/a04966de-2941-4cca-b241-21c2dd9bcba8-kube-api-access-9qd7j\") pod \"a04966de-2941-4cca-b241-21c2dd9bcba8\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.621331 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-utilities\") pod \"a04966de-2941-4cca-b241-21c2dd9bcba8\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.621367 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-catalog-content\") pod \"a04966de-2941-4cca-b241-21c2dd9bcba8\" (UID: \"a04966de-2941-4cca-b241-21c2dd9bcba8\") " Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.624440 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-utilities" (OuterVolumeSpecName: "utilities") pod "a04966de-2941-4cca-b241-21c2dd9bcba8" (UID: "a04966de-2941-4cca-b241-21c2dd9bcba8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.628578 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a04966de-2941-4cca-b241-21c2dd9bcba8-kube-api-access-9qd7j" (OuterVolumeSpecName: "kube-api-access-9qd7j") pod "a04966de-2941-4cca-b241-21c2dd9bcba8" (UID: "a04966de-2941-4cca-b241-21c2dd9bcba8"). InnerVolumeSpecName "kube-api-access-9qd7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.651969 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a04966de-2941-4cca-b241-21c2dd9bcba8" (UID: "a04966de-2941-4cca-b241-21c2dd9bcba8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.723054 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qd7j\" (UniqueName: \"kubernetes.io/projected/a04966de-2941-4cca-b241-21c2dd9bcba8-kube-api-access-9qd7j\") on node \"crc\" DevicePath \"\"" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.723086 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.723096 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04966de-2941-4cca-b241-21c2dd9bcba8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:31:48 crc kubenswrapper[4814]: W0122 05:31:48.904480 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23e8e8f1_5804_407e_a3c0_8f0d7d2c89e4.slice/crio-421dd3b5582e701da1ca15f7e30583e3def4a501ebf227ce565f681e86d50cab WatchSource:0}: Error finding container 421dd3b5582e701da1ca15f7e30583e3def4a501ebf227ce565f681e86d50cab: Status 404 returned error can't find the container with id 421dd3b5582e701da1ca15f7e30583e3def4a501ebf227ce565f681e86d50cab Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.909728 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-557998694f-5nj7s"] Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.991490 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" event={"ID":"23e8e8f1-5804-407e-a3c0-8f0d7d2c89e4","Type":"ContainerStarted","Data":"421dd3b5582e701da1ca15f7e30583e3def4a501ebf227ce565f681e86d50cab"} Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.995448 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r8vbv" event={"ID":"a04966de-2941-4cca-b241-21c2dd9bcba8","Type":"ContainerDied","Data":"a97b69806dad784a4671c50919c7189449b827c4b91e9f29bb5ee2708a2ec12a"} Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.995507 4814 scope.go:117] "RemoveContainer" containerID="d63f0605eb8c8d88f3506f049ac567548007c9ee78db3271bc9867d38ca2ab6f" Jan 22 05:31:48 crc kubenswrapper[4814]: I0122 05:31:48.995652 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r8vbv" Jan 22 05:31:49 crc kubenswrapper[4814]: I0122 05:31:49.034831 4814 scope.go:117] "RemoveContainer" containerID="ba9d5fe5399fde0df902677ecca627d0bf1025c42d95039ca33c0c109beea855" Jan 22 05:31:49 crc kubenswrapper[4814]: I0122 05:31:49.038513 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r8vbv"] Jan 22 05:31:49 crc kubenswrapper[4814]: I0122 05:31:49.044436 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-r8vbv"] Jan 22 05:31:49 crc kubenswrapper[4814]: I0122 05:31:49.049906 4814 scope.go:117] "RemoveContainer" containerID="7e35bbb7b5e8612050ca36102c97c0ebcdf3ce443f78a16e4c020a3d9f445300" Jan 22 05:31:50 crc kubenswrapper[4814]: I0122 05:31:50.352733 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" path="/var/lib/kubelet/pods/a04966de-2941-4cca-b241-21c2dd9bcba8/volumes" Jan 22 05:31:54 crc kubenswrapper[4814]: I0122 05:31:54.040360 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" event={"ID":"23e8e8f1-5804-407e-a3c0-8f0d7d2c89e4","Type":"ContainerStarted","Data":"8210ea9435aeef7c6536d49c2169c3b8914fc266bb2444f84aba7971cced2bde"} Jan 22 05:31:54 crc kubenswrapper[4814]: I0122 05:31:54.042248 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" Jan 22 05:31:54 crc kubenswrapper[4814]: I0122 05:31:54.071290 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" podStartSLOduration=2.004835013 podStartE2EDuration="6.071270667s" podCreationTimestamp="2026-01-22 05:31:48 +0000 UTC" firstStartedPulling="2026-01-22 05:31:48.907083302 +0000 UTC m=+794.990571557" lastFinishedPulling="2026-01-22 05:31:52.973518996 +0000 UTC m=+799.057007211" observedRunningTime="2026-01-22 05:31:54.067293644 +0000 UTC m=+800.150781879" watchObservedRunningTime="2026-01-22 05:31:54.071270667 +0000 UTC m=+800.154758892" Jan 22 05:31:58 crc kubenswrapper[4814]: I0122 05:31:58.421230 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-557998694f-5nj7s" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.809110 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-s2fwc"] Jan 22 05:32:03 crc kubenswrapper[4814]: E0122 05:32:03.809821 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerName="registry-server" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.809843 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerName="registry-server" Jan 22 05:32:03 crc kubenswrapper[4814]: E0122 05:32:03.809863 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerName="extract-content" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.809875 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerName="extract-content" Jan 22 05:32:03 crc kubenswrapper[4814]: E0122 05:32:03.809897 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerName="extract-utilities" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.809909 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerName="extract-utilities" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.810112 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a04966de-2941-4cca-b241-21c2dd9bcba8" containerName="registry-server" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.811541 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.839310 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s2fwc"] Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.940019 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-catalog-content\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.940364 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5zcq\" (UniqueName: \"kubernetes.io/projected/905e44b2-de51-4139-a5aa-2a3ccb22e017-kube-api-access-n5zcq\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:03 crc kubenswrapper[4814]: I0122 05:32:03.940438 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-utilities\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:04 crc kubenswrapper[4814]: I0122 05:32:04.041674 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-catalog-content\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:04 crc kubenswrapper[4814]: I0122 05:32:04.041750 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5zcq\" (UniqueName: \"kubernetes.io/projected/905e44b2-de51-4139-a5aa-2a3ccb22e017-kube-api-access-n5zcq\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:04 crc kubenswrapper[4814]: I0122 05:32:04.041820 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-utilities\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:04 crc kubenswrapper[4814]: I0122 05:32:04.042273 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-catalog-content\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:04 crc kubenswrapper[4814]: I0122 05:32:04.042330 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-utilities\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:04 crc kubenswrapper[4814]: I0122 05:32:04.059932 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5zcq\" (UniqueName: \"kubernetes.io/projected/905e44b2-de51-4139-a5aa-2a3ccb22e017-kube-api-access-n5zcq\") pod \"redhat-operators-s2fwc\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:04 crc kubenswrapper[4814]: I0122 05:32:04.139420 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:04 crc kubenswrapper[4814]: I0122 05:32:04.394087 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s2fwc"] Jan 22 05:32:05 crc kubenswrapper[4814]: I0122 05:32:05.112725 4814 generic.go:334] "Generic (PLEG): container finished" podID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerID="cebcf89081eac50596ca0ee9c5f4783350668f3170eb51f0ed5f52a81ff9f079" exitCode=0 Jan 22 05:32:05 crc kubenswrapper[4814]: I0122 05:32:05.112776 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2fwc" event={"ID":"905e44b2-de51-4139-a5aa-2a3ccb22e017","Type":"ContainerDied","Data":"cebcf89081eac50596ca0ee9c5f4783350668f3170eb51f0ed5f52a81ff9f079"} Jan 22 05:32:05 crc kubenswrapper[4814]: I0122 05:32:05.112810 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2fwc" event={"ID":"905e44b2-de51-4139-a5aa-2a3ccb22e017","Type":"ContainerStarted","Data":"4d0fde46cabbc430543351f8cb2bd7433c77a1e3b51586acaf504c6da2df9d5c"} Jan 22 05:32:07 crc kubenswrapper[4814]: I0122 05:32:07.130061 4814 generic.go:334] "Generic (PLEG): container finished" podID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerID="76e63393de9a99b2032b9d73c61e4522ee980ac175fb0b4fb9ec03c0883e7289" exitCode=0 Jan 22 05:32:07 crc kubenswrapper[4814]: I0122 05:32:07.130129 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2fwc" event={"ID":"905e44b2-de51-4139-a5aa-2a3ccb22e017","Type":"ContainerDied","Data":"76e63393de9a99b2032b9d73c61e4522ee980ac175fb0b4fb9ec03c0883e7289"} Jan 22 05:32:08 crc kubenswrapper[4814]: I0122 05:32:08.141242 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2fwc" event={"ID":"905e44b2-de51-4139-a5aa-2a3ccb22e017","Type":"ContainerStarted","Data":"f37f09f3e4bb2cb64a1407d47381ac17e8791854ad82ce907b39fd6c508d6348"} Jan 22 05:32:08 crc kubenswrapper[4814]: I0122 05:32:08.175358 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-s2fwc" podStartSLOduration=2.771012545 podStartE2EDuration="5.175336939s" podCreationTimestamp="2026-01-22 05:32:03 +0000 UTC" firstStartedPulling="2026-01-22 05:32:05.114562115 +0000 UTC m=+811.198050330" lastFinishedPulling="2026-01-22 05:32:07.518886469 +0000 UTC m=+813.602374724" observedRunningTime="2026-01-22 05:32:08.171770409 +0000 UTC m=+814.255258654" watchObservedRunningTime="2026-01-22 05:32:08.175336939 +0000 UTC m=+814.258825184" Jan 22 05:32:14 crc kubenswrapper[4814]: I0122 05:32:14.139679 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:14 crc kubenswrapper[4814]: I0122 05:32:14.141828 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:15 crc kubenswrapper[4814]: I0122 05:32:15.201598 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-s2fwc" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="registry-server" probeResult="failure" output=< Jan 22 05:32:15 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:32:15 crc kubenswrapper[4814]: > Jan 22 05:32:24 crc kubenswrapper[4814]: I0122 05:32:24.218680 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:24 crc kubenswrapper[4814]: I0122 05:32:24.297200 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:24 crc kubenswrapper[4814]: I0122 05:32:24.448675 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s2fwc"] Jan 22 05:32:25 crc kubenswrapper[4814]: I0122 05:32:25.264453 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-s2fwc" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="registry-server" containerID="cri-o://f37f09f3e4bb2cb64a1407d47381ac17e8791854ad82ce907b39fd6c508d6348" gracePeriod=2 Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.276572 4814 generic.go:334] "Generic (PLEG): container finished" podID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerID="f37f09f3e4bb2cb64a1407d47381ac17e8791854ad82ce907b39fd6c508d6348" exitCode=0 Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.276670 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2fwc" event={"ID":"905e44b2-de51-4139-a5aa-2a3ccb22e017","Type":"ContainerDied","Data":"f37f09f3e4bb2cb64a1407d47381ac17e8791854ad82ce907b39fd6c508d6348"} Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.682170 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.807415 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-catalog-content\") pod \"905e44b2-de51-4139-a5aa-2a3ccb22e017\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.807481 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5zcq\" (UniqueName: \"kubernetes.io/projected/905e44b2-de51-4139-a5aa-2a3ccb22e017-kube-api-access-n5zcq\") pod \"905e44b2-de51-4139-a5aa-2a3ccb22e017\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.807519 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-utilities\") pod \"905e44b2-de51-4139-a5aa-2a3ccb22e017\" (UID: \"905e44b2-de51-4139-a5aa-2a3ccb22e017\") " Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.808319 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-utilities" (OuterVolumeSpecName: "utilities") pod "905e44b2-de51-4139-a5aa-2a3ccb22e017" (UID: "905e44b2-de51-4139-a5aa-2a3ccb22e017"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.813741 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/905e44b2-de51-4139-a5aa-2a3ccb22e017-kube-api-access-n5zcq" (OuterVolumeSpecName: "kube-api-access-n5zcq") pod "905e44b2-de51-4139-a5aa-2a3ccb22e017" (UID: "905e44b2-de51-4139-a5aa-2a3ccb22e017"). InnerVolumeSpecName "kube-api-access-n5zcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.908624 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.908671 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5zcq\" (UniqueName: \"kubernetes.io/projected/905e44b2-de51-4139-a5aa-2a3ccb22e017-kube-api-access-n5zcq\") on node \"crc\" DevicePath \"\"" Jan 22 05:32:27 crc kubenswrapper[4814]: I0122 05:32:27.911194 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "905e44b2-de51-4139-a5aa-2a3ccb22e017" (UID: "905e44b2-de51-4139-a5aa-2a3ccb22e017"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:32:28 crc kubenswrapper[4814]: I0122 05:32:28.010206 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/905e44b2-de51-4139-a5aa-2a3ccb22e017-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:32:28 crc kubenswrapper[4814]: I0122 05:32:28.288605 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s2fwc" event={"ID":"905e44b2-de51-4139-a5aa-2a3ccb22e017","Type":"ContainerDied","Data":"4d0fde46cabbc430543351f8cb2bd7433c77a1e3b51586acaf504c6da2df9d5c"} Jan 22 05:32:28 crc kubenswrapper[4814]: I0122 05:32:28.288710 4814 scope.go:117] "RemoveContainer" containerID="f37f09f3e4bb2cb64a1407d47381ac17e8791854ad82ce907b39fd6c508d6348" Jan 22 05:32:28 crc kubenswrapper[4814]: I0122 05:32:28.288725 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s2fwc" Jan 22 05:32:28 crc kubenswrapper[4814]: I0122 05:32:28.330743 4814 scope.go:117] "RemoveContainer" containerID="76e63393de9a99b2032b9d73c61e4522ee980ac175fb0b4fb9ec03c0883e7289" Jan 22 05:32:28 crc kubenswrapper[4814]: I0122 05:32:28.342238 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s2fwc"] Jan 22 05:32:28 crc kubenswrapper[4814]: I0122 05:32:28.358315 4814 scope.go:117] "RemoveContainer" containerID="cebcf89081eac50596ca0ee9c5f4783350668f3170eb51f0ed5f52a81ff9f079" Jan 22 05:32:28 crc kubenswrapper[4814]: I0122 05:32:28.365837 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-s2fwc"] Jan 22 05:32:30 crc kubenswrapper[4814]: I0122 05:32:30.351532 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" path="/var/lib/kubelet/pods/905e44b2-de51-4139-a5aa-2a3ccb22e017/volumes" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.455791 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699"] Jan 22 05:32:38 crc kubenswrapper[4814]: E0122 05:32:38.456366 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="extract-utilities" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.456377 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="extract-utilities" Jan 22 05:32:38 crc kubenswrapper[4814]: E0122 05:32:38.456391 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="extract-content" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.456397 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="extract-content" Jan 22 05:32:38 crc kubenswrapper[4814]: E0122 05:32:38.456415 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="registry-server" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.456420 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="registry-server" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.456528 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="905e44b2-de51-4139-a5aa-2a3ccb22e017" containerName="registry-server" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.456866 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.461017 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.461475 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.461975 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-wzl2p" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.467295 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-2547t" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.477299 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.491384 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.497719 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-5ggvl" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.500009 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.525942 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gb4wg\" (UniqueName: \"kubernetes.io/projected/03b37176-5f03-44da-b6e5-1d1364483db3-kube-api-access-gb4wg\") pod \"barbican-operator-controller-manager-59dd8b7cbf-wl699\" (UID: \"03b37176-5f03-44da-b6e5-1d1364483db3\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.526002 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkp8s\" (UniqueName: \"kubernetes.io/projected/7a390991-5dac-4dd5-8afb-996222205b63-kube-api-access-kkp8s\") pod \"designate-operator-controller-manager-b45d7bf98-mtt48\" (UID: \"7a390991-5dac-4dd5-8afb-996222205b63\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.526040 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kzr6\" (UniqueName: \"kubernetes.io/projected/f07d8bf3-82b4-4d63-982b-e8e423ee422b-kube-api-access-4kzr6\") pod \"cinder-operator-controller-manager-69cf5d4557-vrv59\" (UID: \"f07d8bf3-82b4-4d63-982b-e8e423ee422b\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.536597 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.560513 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.573248 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.573938 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.576539 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-hrgvk" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.579328 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.579864 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.583373 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-x8pbt" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.598854 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.609596 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.610389 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.614138 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-gx4td" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.626914 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gb4wg\" (UniqueName: \"kubernetes.io/projected/03b37176-5f03-44da-b6e5-1d1364483db3-kube-api-access-gb4wg\") pod \"barbican-operator-controller-manager-59dd8b7cbf-wl699\" (UID: \"03b37176-5f03-44da-b6e5-1d1364483db3\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.626969 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkp8s\" (UniqueName: \"kubernetes.io/projected/7a390991-5dac-4dd5-8afb-996222205b63-kube-api-access-kkp8s\") pod \"designate-operator-controller-manager-b45d7bf98-mtt48\" (UID: \"7a390991-5dac-4dd5-8afb-996222205b63\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.627001 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kzr6\" (UniqueName: \"kubernetes.io/projected/f07d8bf3-82b4-4d63-982b-e8e423ee422b-kube-api-access-4kzr6\") pod \"cinder-operator-controller-manager-69cf5d4557-vrv59\" (UID: \"f07d8bf3-82b4-4d63-982b-e8e423ee422b\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.627026 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-859hm\" (UniqueName: \"kubernetes.io/projected/44e4f657-acdf-4258-abf1-ae2dc3e6efd3-kube-api-access-859hm\") pod \"glance-operator-controller-manager-78fdd796fd-8wkcr\" (UID: \"44e4f657-acdf-4258-abf1-ae2dc3e6efd3\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.627043 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvxmb\" (UniqueName: \"kubernetes.io/projected/e48491a3-9c69-4ebf-a97f-b6226e2b91ae-kube-api-access-pvxmb\") pod \"horizon-operator-controller-manager-77d5c5b54f-d65cp\" (UID: \"e48491a3-9c69-4ebf-a97f-b6226e2b91ae\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.627072 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htpj6\" (UniqueName: \"kubernetes.io/projected/51023e61-a082-4097-8581-1451d02ef61a-kube-api-access-htpj6\") pod \"heat-operator-controller-manager-594c8c9d5d-nmspv\" (UID: \"51023e61-a082-4097-8581-1451d02ef61a\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.649621 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.668899 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.671153 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gb4wg\" (UniqueName: \"kubernetes.io/projected/03b37176-5f03-44da-b6e5-1d1364483db3-kube-api-access-gb4wg\") pod \"barbican-operator-controller-manager-59dd8b7cbf-wl699\" (UID: \"03b37176-5f03-44da-b6e5-1d1364483db3\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.673973 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kzr6\" (UniqueName: \"kubernetes.io/projected/f07d8bf3-82b4-4d63-982b-e8e423ee422b-kube-api-access-4kzr6\") pod \"cinder-operator-controller-manager-69cf5d4557-vrv59\" (UID: \"f07d8bf3-82b4-4d63-982b-e8e423ee422b\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.680996 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkp8s\" (UniqueName: \"kubernetes.io/projected/7a390991-5dac-4dd5-8afb-996222205b63-kube-api-access-kkp8s\") pod \"designate-operator-controller-manager-b45d7bf98-mtt48\" (UID: \"7a390991-5dac-4dd5-8afb-996222205b63\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.692142 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.692963 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.696798 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-t4qkw" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.696982 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.703134 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.712507 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.713255 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.715753 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.716207 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-bccnx" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.716585 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.728218 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-859hm\" (UniqueName: \"kubernetes.io/projected/44e4f657-acdf-4258-abf1-ae2dc3e6efd3-kube-api-access-859hm\") pod \"glance-operator-controller-manager-78fdd796fd-8wkcr\" (UID: \"44e4f657-acdf-4258-abf1-ae2dc3e6efd3\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.728265 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvxmb\" (UniqueName: \"kubernetes.io/projected/e48491a3-9c69-4ebf-a97f-b6226e2b91ae-kube-api-access-pvxmb\") pod \"horizon-operator-controller-manager-77d5c5b54f-d65cp\" (UID: \"e48491a3-9c69-4ebf-a97f-b6226e2b91ae\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.728300 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htpj6\" (UniqueName: \"kubernetes.io/projected/51023e61-a082-4097-8581-1451d02ef61a-kube-api-access-htpj6\") pod \"heat-operator-controller-manager-594c8c9d5d-nmspv\" (UID: \"51023e61-a082-4097-8581-1451d02ef61a\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.733083 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-d95tg" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.734325 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.746819 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.763561 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htpj6\" (UniqueName: \"kubernetes.io/projected/51023e61-a082-4097-8581-1451d02ef61a-kube-api-access-htpj6\") pod \"heat-operator-controller-manager-594c8c9d5d-nmspv\" (UID: \"51023e61-a082-4097-8581-1451d02ef61a\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.770353 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvxmb\" (UniqueName: \"kubernetes.io/projected/e48491a3-9c69-4ebf-a97f-b6226e2b91ae-kube-api-access-pvxmb\") pod \"horizon-operator-controller-manager-77d5c5b54f-d65cp\" (UID: \"e48491a3-9c69-4ebf-a97f-b6226e2b91ae\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.771102 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-859hm\" (UniqueName: \"kubernetes.io/projected/44e4f657-acdf-4258-abf1-ae2dc3e6efd3-kube-api-access-859hm\") pod \"glance-operator-controller-manager-78fdd796fd-8wkcr\" (UID: \"44e4f657-acdf-4258-abf1-ae2dc3e6efd3\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.792866 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.793676 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.796755 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-4mrbq" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.810750 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.814938 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.840227 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.842595 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.842653 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69knj\" (UniqueName: \"kubernetes.io/projected/9b9196d5-18b7-4426-9506-ed3278b49437-kube-api-access-69knj\") pod \"ironic-operator-controller-manager-69d6c9f5b8-28z6x\" (UID: \"9b9196d5-18b7-4426-9506-ed3278b49437\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.842703 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxlbx\" (UniqueName: \"kubernetes.io/projected/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-kube-api-access-cxlbx\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.842726 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnw4g\" (UniqueName: \"kubernetes.io/projected/668a0682-ff7d-4141-9b7b-1a6fd8f6eb28-kube-api-access-nnw4g\") pod \"keystone-operator-controller-manager-b8b6d4659-4csc6\" (UID: \"668a0682-ff7d-4141-9b7b-1a6fd8f6eb28\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.842742 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hgdq\" (UniqueName: \"kubernetes.io/projected/34ce6a30-f048-4c7c-b02a-00a5409379d0-kube-api-access-2hgdq\") pod \"manila-operator-controller-manager-78c6999f6f-hc6fj\" (UID: \"34ce6a30-f048-4c7c-b02a-00a5409379d0\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.842885 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.852886 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.853661 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.863705 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.864780 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.869539 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-jtg42" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.873117 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.873388 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-k9sjf" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.874183 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.891489 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-4gpg8" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.891728 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.903717 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.904039 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.907323 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.945106 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d"] Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.945779 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnw4g\" (UniqueName: \"kubernetes.io/projected/668a0682-ff7d-4141-9b7b-1a6fd8f6eb28-kube-api-access-nnw4g\") pod \"keystone-operator-controller-manager-b8b6d4659-4csc6\" (UID: \"668a0682-ff7d-4141-9b7b-1a6fd8f6eb28\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.945805 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hgdq\" (UniqueName: \"kubernetes.io/projected/34ce6a30-f048-4c7c-b02a-00a5409379d0-kube-api-access-2hgdq\") pod \"manila-operator-controller-manager-78c6999f6f-hc6fj\" (UID: \"34ce6a30-f048-4c7c-b02a-00a5409379d0\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.945848 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.945887 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69knj\" (UniqueName: \"kubernetes.io/projected/9b9196d5-18b7-4426-9506-ed3278b49437-kube-api-access-69knj\") pod \"ironic-operator-controller-manager-69d6c9f5b8-28z6x\" (UID: \"9b9196d5-18b7-4426-9506-ed3278b49437\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.945934 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxlbx\" (UniqueName: \"kubernetes.io/projected/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-kube-api-access-cxlbx\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.962764 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" Jan 22 05:32:38 crc kubenswrapper[4814]: E0122 05:32:38.963886 4814 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:38 crc kubenswrapper[4814]: E0122 05:32:38.963945 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert podName:2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a nodeName:}" failed. No retries permitted until 2026-01-22 05:32:39.463922836 +0000 UTC m=+845.547411051 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert") pod "infra-operator-controller-manager-54ccf4f85d-27czn" (UID: "2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a") : secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:38 crc kubenswrapper[4814]: I0122 05:32:38.990138 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:38.995599 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.008551 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-x564h" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.018706 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hgdq\" (UniqueName: \"kubernetes.io/projected/34ce6a30-f048-4c7c-b02a-00a5409379d0-kube-api-access-2hgdq\") pod \"manila-operator-controller-manager-78c6999f6f-hc6fj\" (UID: \"34ce6a30-f048-4c7c-b02a-00a5409379d0\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.018709 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.019877 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.031720 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-5rnc2" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.045927 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxlbx\" (UniqueName: \"kubernetes.io/projected/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-kube-api-access-cxlbx\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.046510 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgbg7\" (UniqueName: \"kubernetes.io/projected/4c31a125-9a4c-46fc-99eb-cf5b563d5342-kube-api-access-bgbg7\") pod \"mariadb-operator-controller-manager-c87fff755-zlp6t\" (UID: \"4c31a125-9a4c-46fc-99eb-cf5b563d5342\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.046577 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmj47\" (UniqueName: \"kubernetes.io/projected/c71d95dc-90cc-4d59-85b1-5e43c670c034-kube-api-access-nmj47\") pod \"nova-operator-controller-manager-6b8bc8d87d-ht9kj\" (UID: \"c71d95dc-90cc-4d59-85b1-5e43c670c034\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.046596 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgzjx\" (UniqueName: \"kubernetes.io/projected/deb943fe-dd15-4874-a5cb-0f6ce4b2c291-kube-api-access-cgzjx\") pod \"neutron-operator-controller-manager-5d8f59fb49-lr89d\" (UID: \"deb943fe-dd15-4874-a5cb-0f6ce4b2c291\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.047252 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69knj\" (UniqueName: \"kubernetes.io/projected/9b9196d5-18b7-4426-9506-ed3278b49437-kube-api-access-69knj\") pod \"ironic-operator-controller-manager-69d6c9f5b8-28z6x\" (UID: \"9b9196d5-18b7-4426-9506-ed3278b49437\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.048808 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.057438 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnw4g\" (UniqueName: \"kubernetes.io/projected/668a0682-ff7d-4141-9b7b-1a6fd8f6eb28-kube-api-access-nnw4g\") pod \"keystone-operator-controller-manager-b8b6d4659-4csc6\" (UID: \"668a0682-ff7d-4141-9b7b-1a6fd8f6eb28\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.057722 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.111550 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.144277 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.148585 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j766v\" (UniqueName: \"kubernetes.io/projected/10ebf9ef-4973-4395-8b4d-d916626341ee-kube-api-access-j766v\") pod \"octavia-operator-controller-manager-7bd9774b6-qr7k2\" (UID: \"10ebf9ef-4973-4395-8b4d-d916626341ee\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.148987 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgbg7\" (UniqueName: \"kubernetes.io/projected/4c31a125-9a4c-46fc-99eb-cf5b563d5342-kube-api-access-bgbg7\") pod \"mariadb-operator-controller-manager-c87fff755-zlp6t\" (UID: \"4c31a125-9a4c-46fc-99eb-cf5b563d5342\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.149053 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vwl4\" (UniqueName: \"kubernetes.io/projected/a1db7589-a69f-44a1-8b2c-44ea0c49366c-kube-api-access-6vwl4\") pod \"ovn-operator-controller-manager-55db956ddc-pzbz4\" (UID: \"a1db7589-a69f-44a1-8b2c-44ea0c49366c\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.149071 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmj47\" (UniqueName: \"kubernetes.io/projected/c71d95dc-90cc-4d59-85b1-5e43c670c034-kube-api-access-nmj47\") pod \"nova-operator-controller-manager-6b8bc8d87d-ht9kj\" (UID: \"c71d95dc-90cc-4d59-85b1-5e43c670c034\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.149094 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgzjx\" (UniqueName: \"kubernetes.io/projected/deb943fe-dd15-4874-a5cb-0f6ce4b2c291-kube-api-access-cgzjx\") pod \"neutron-operator-controller-manager-5d8f59fb49-lr89d\" (UID: \"deb943fe-dd15-4874-a5cb-0f6ce4b2c291\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.152742 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.188293 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgzjx\" (UniqueName: \"kubernetes.io/projected/deb943fe-dd15-4874-a5cb-0f6ce4b2c291-kube-api-access-cgzjx\") pod \"neutron-operator-controller-manager-5d8f59fb49-lr89d\" (UID: \"deb943fe-dd15-4874-a5cb-0f6ce4b2c291\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.205206 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.212137 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgbg7\" (UniqueName: \"kubernetes.io/projected/4c31a125-9a4c-46fc-99eb-cf5b563d5342-kube-api-access-bgbg7\") pod \"mariadb-operator-controller-manager-c87fff755-zlp6t\" (UID: \"4c31a125-9a4c-46fc-99eb-cf5b563d5342\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.230689 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.231533 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.232774 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmj47\" (UniqueName: \"kubernetes.io/projected/c71d95dc-90cc-4d59-85b1-5e43c670c034-kube-api-access-nmj47\") pod \"nova-operator-controller-manager-6b8bc8d87d-ht9kj\" (UID: \"c71d95dc-90cc-4d59-85b1-5e43c670c034\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.245200 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.245435 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-ds7wf" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.252002 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j766v\" (UniqueName: \"kubernetes.io/projected/10ebf9ef-4973-4395-8b4d-d916626341ee-kube-api-access-j766v\") pod \"octavia-operator-controller-manager-7bd9774b6-qr7k2\" (UID: \"10ebf9ef-4973-4395-8b4d-d916626341ee\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.252089 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vwl4\" (UniqueName: \"kubernetes.io/projected/a1db7589-a69f-44a1-8b2c-44ea0c49366c-kube-api-access-6vwl4\") pod \"ovn-operator-controller-manager-55db956ddc-pzbz4\" (UID: \"a1db7589-a69f-44a1-8b2c-44ea0c49366c\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.256851 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.257790 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.264350 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-5gdv2" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.298801 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vwl4\" (UniqueName: \"kubernetes.io/projected/a1db7589-a69f-44a1-8b2c-44ea0c49366c-kube-api-access-6vwl4\") pod \"ovn-operator-controller-manager-55db956ddc-pzbz4\" (UID: \"a1db7589-a69f-44a1-8b2c-44ea0c49366c\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.312343 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j766v\" (UniqueName: \"kubernetes.io/projected/10ebf9ef-4973-4395-8b4d-d916626341ee-kube-api-access-j766v\") pod \"octavia-operator-controller-manager-7bd9774b6-qr7k2\" (UID: \"10ebf9ef-4973-4395-8b4d-d916626341ee\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.312421 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.317488 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.327092 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.327887 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.333581 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-4hpnd" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.363136 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.365088 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.366275 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svmq9\" (UniqueName: \"kubernetes.io/projected/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-kube-api-access-svmq9\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.366365 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw7tv\" (UniqueName: \"kubernetes.io/projected/dd47b70e-0c66-4e11-9626-116a83513318-kube-api-access-xw7tv\") pod \"placement-operator-controller-manager-5d646b7d76-v45zc\" (UID: \"dd47b70e-0c66-4e11-9626-116a83513318\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.366411 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.390706 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2g9rq" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.390863 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.403417 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.404042 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.404795 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.409884 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-fwhbr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.412107 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.420749 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.445871 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.446734 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.464109 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-xfdfh" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.469872 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.470150 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7g4b\" (UniqueName: \"kubernetes.io/projected/90b0a17b-e8e5-4ca4-9321-9a1980e27d70-kube-api-access-r7g4b\") pod \"swift-operator-controller-manager-547cbdb99f-czd57\" (UID: \"90b0a17b-e8e5-4ca4-9321-9a1980e27d70\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.470188 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw7tv\" (UniqueName: \"kubernetes.io/projected/dd47b70e-0c66-4e11-9626-116a83513318-kube-api-access-xw7tv\") pod \"placement-operator-controller-manager-5d646b7d76-v45zc\" (UID: \"dd47b70e-0c66-4e11-9626-116a83513318\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.470227 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.470272 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8hrl\" (UniqueName: \"kubernetes.io/projected/50ad8e85-d883-48ae-a3ea-fefd136db8ca-kube-api-access-p8hrl\") pod \"telemetry-operator-controller-manager-85cd9769bb-pgjk5\" (UID: \"50ad8e85-d883-48ae-a3ea-fefd136db8ca\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.470296 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svmq9\" (UniqueName: \"kubernetes.io/projected/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-kube-api-access-svmq9\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:39 crc kubenswrapper[4814]: E0122 05:32:39.470566 4814 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:39 crc kubenswrapper[4814]: E0122 05:32:39.470607 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert podName:2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a nodeName:}" failed. No retries permitted until 2026-01-22 05:32:40.470592866 +0000 UTC m=+846.554081081 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert") pod "infra-operator-controller-manager-54ccf4f85d-27czn" (UID: "2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a") : secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:39 crc kubenswrapper[4814]: E0122 05:32:39.471018 4814 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:39 crc kubenswrapper[4814]: E0122 05:32:39.471048 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert podName:e0bcc5d5-4473-4c8b-864a-ff88a0f75595 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:39.97103881 +0000 UTC m=+846.054527025 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" (UID: "e0bcc5d5-4473-4c8b-864a-ff88a0f75595") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.473301 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.481905 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.491278 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.509673 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.510471 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.516017 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.516278 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-j5c26" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.516394 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.517383 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svmq9\" (UniqueName: \"kubernetes.io/projected/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-kube-api-access-svmq9\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.528189 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.541407 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.543136 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw7tv\" (UniqueName: \"kubernetes.io/projected/dd47b70e-0c66-4e11-9626-116a83513318-kube-api-access-xw7tv\") pod \"placement-operator-controller-manager-5d646b7d76-v45zc\" (UID: \"dd47b70e-0c66-4e11-9626-116a83513318\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.573816 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.574711 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.575353 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p87f2\" (UniqueName: \"kubernetes.io/projected/c0cd1e4f-a723-4eba-a656-fa6e261b17cb-kube-api-access-p87f2\") pod \"watcher-operator-controller-manager-5ffb9c6597-glzvm\" (UID: \"c0cd1e4f-a723-4eba-a656-fa6e261b17cb\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.575436 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8hrl\" (UniqueName: \"kubernetes.io/projected/50ad8e85-d883-48ae-a3ea-fefd136db8ca-kube-api-access-p8hrl\") pod \"telemetry-operator-controller-manager-85cd9769bb-pgjk5\" (UID: \"50ad8e85-d883-48ae-a3ea-fefd136db8ca\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.575491 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7g4b\" (UniqueName: \"kubernetes.io/projected/90b0a17b-e8e5-4ca4-9321-9a1980e27d70-kube-api-access-r7g4b\") pod \"swift-operator-controller-manager-547cbdb99f-czd57\" (UID: \"90b0a17b-e8e5-4ca4-9321-9a1980e27d70\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.575527 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rv7s\" (UniqueName: \"kubernetes.io/projected/77b0a214-c858-4157-924a-a9424d890683-kube-api-access-2rv7s\") pod \"test-operator-controller-manager-69797bbcbd-hprbt\" (UID: \"77b0a214-c858-4157-924a-a9424d890683\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.579904 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-5nrbz" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.585691 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr"] Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.595100 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8hrl\" (UniqueName: \"kubernetes.io/projected/50ad8e85-d883-48ae-a3ea-fefd136db8ca-kube-api-access-p8hrl\") pod \"telemetry-operator-controller-manager-85cd9769bb-pgjk5\" (UID: \"50ad8e85-d883-48ae-a3ea-fefd136db8ca\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.609342 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7g4b\" (UniqueName: \"kubernetes.io/projected/90b0a17b-e8e5-4ca4-9321-9a1980e27d70-kube-api-access-r7g4b\") pod \"swift-operator-controller-manager-547cbdb99f-czd57\" (UID: \"90b0a17b-e8e5-4ca4-9321-9a1980e27d70\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.664910 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.682242 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.682302 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.682326 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsjz4\" (UniqueName: \"kubernetes.io/projected/1327ad33-b223-4c96-9b96-a20816f50f4d-kube-api-access-vsjz4\") pod \"rabbitmq-cluster-operator-manager-668c99d594-lvlrr\" (UID: \"1327ad33-b223-4c96-9b96-a20816f50f4d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.682342 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4dlc\" (UniqueName: \"kubernetes.io/projected/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-kube-api-access-l4dlc\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.682394 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rv7s\" (UniqueName: \"kubernetes.io/projected/77b0a214-c858-4157-924a-a9424d890683-kube-api-access-2rv7s\") pod \"test-operator-controller-manager-69797bbcbd-hprbt\" (UID: \"77b0a214-c858-4157-924a-a9424d890683\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.682431 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p87f2\" (UniqueName: \"kubernetes.io/projected/c0cd1e4f-a723-4eba-a656-fa6e261b17cb-kube-api-access-p87f2\") pod \"watcher-operator-controller-manager-5ffb9c6597-glzvm\" (UID: \"c0cd1e4f-a723-4eba-a656-fa6e261b17cb\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.683952 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.710449 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rv7s\" (UniqueName: \"kubernetes.io/projected/77b0a214-c858-4157-924a-a9424d890683-kube-api-access-2rv7s\") pod \"test-operator-controller-manager-69797bbcbd-hprbt\" (UID: \"77b0a214-c858-4157-924a-a9424d890683\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.710661 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.728571 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p87f2\" (UniqueName: \"kubernetes.io/projected/c0cd1e4f-a723-4eba-a656-fa6e261b17cb-kube-api-access-p87f2\") pod \"watcher-operator-controller-manager-5ffb9c6597-glzvm\" (UID: \"c0cd1e4f-a723-4eba-a656-fa6e261b17cb\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.741418 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.784607 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.784676 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.784698 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsjz4\" (UniqueName: \"kubernetes.io/projected/1327ad33-b223-4c96-9b96-a20816f50f4d-kube-api-access-vsjz4\") pod \"rabbitmq-cluster-operator-manager-668c99d594-lvlrr\" (UID: \"1327ad33-b223-4c96-9b96-a20816f50f4d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.784715 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4dlc\" (UniqueName: \"kubernetes.io/projected/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-kube-api-access-l4dlc\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:39 crc kubenswrapper[4814]: E0122 05:32:39.785104 4814 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 05:32:39 crc kubenswrapper[4814]: E0122 05:32:39.785140 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:40.28512791 +0000 UTC m=+846.368616115 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "metrics-server-cert" not found Jan 22 05:32:39 crc kubenswrapper[4814]: E0122 05:32:39.786578 4814 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 05:32:39 crc kubenswrapper[4814]: E0122 05:32:39.786609 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:40.286601546 +0000 UTC m=+846.370089761 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "webhook-server-cert" not found Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.823983 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsjz4\" (UniqueName: \"kubernetes.io/projected/1327ad33-b223-4c96-9b96-a20816f50f4d-kube-api-access-vsjz4\") pod \"rabbitmq-cluster-operator-manager-668c99d594-lvlrr\" (UID: \"1327ad33-b223-4c96-9b96-a20816f50f4d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.840158 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4dlc\" (UniqueName: \"kubernetes.io/projected/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-kube-api-access-l4dlc\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.876269 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" Jan 22 05:32:39 crc kubenswrapper[4814]: I0122 05:32:39.974892 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:39.993880 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:39.994008 4814 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:39.994052 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert podName:e0bcc5d5-4473-4c8b-864a-ff88a0f75595 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:40.994037557 +0000 UTC m=+847.077525772 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" (UID: "e0bcc5d5-4473-4c8b-864a-ff88a0f75595") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.198188 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.219119 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.267075 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.300351 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.300417 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.300667 4814 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.300717 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:41.300703627 +0000 UTC m=+847.384191832 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "webhook-server-cert" not found Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.301022 4814 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.301053 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:41.301044838 +0000 UTC m=+847.384533063 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "metrics-server-cert" not found Jan 22 05:32:40 crc kubenswrapper[4814]: W0122 05:32:40.374243 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44e4f657_acdf_4258_abf1_ae2dc3e6efd3.slice/crio-7e8ff2613dc27ccb271f76826f08b10cbeadb7d56489596b007ca359329c910a WatchSource:0}: Error finding container 7e8ff2613dc27ccb271f76826f08b10cbeadb7d56489596b007ca359329c910a: Status 404 returned error can't find the container with id 7e8ff2613dc27ccb271f76826f08b10cbeadb7d56489596b007ca359329c910a Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.398696 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.430412 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.449711 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" event={"ID":"7a390991-5dac-4dd5-8afb-996222205b63","Type":"ContainerStarted","Data":"17b0de564d699ff2cfa1f9e09eade2003bc97e48caf84aa87690aaf93e2b50f9"} Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.450865 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" event={"ID":"44e4f657-acdf-4258-abf1-ae2dc3e6efd3","Type":"ContainerStarted","Data":"7e8ff2613dc27ccb271f76826f08b10cbeadb7d56489596b007ca359329c910a"} Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.454036 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" event={"ID":"f07d8bf3-82b4-4d63-982b-e8e423ee422b","Type":"ContainerStarted","Data":"03f27ee906829fe208b32d5b65b048fd2a5a9285b15aa6a094e8844231932be3"} Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.454808 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" event={"ID":"03b37176-5f03-44da-b6e5-1d1364483db3","Type":"ContainerStarted","Data":"d494370454398927c1740405eeea549469fcdf0fbccacf43ede8dd7302f3b9f2"} Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.504148 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.504320 4814 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.504405 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert podName:2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a nodeName:}" failed. No retries permitted until 2026-01-22 05:32:42.50438607 +0000 UTC m=+848.587874285 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert") pod "infra-operator-controller-manager-54ccf4f85d-27czn" (UID: "2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a") : secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.552595 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d"] Jan 22 05:32:40 crc kubenswrapper[4814]: W0122 05:32:40.559622 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddeb943fe_dd15_4874_a5cb_0f6ce4b2c291.slice/crio-7cac377a1133aab87b9a71ed334ba6d1943555331007e0fcde4beaec8b4167df WatchSource:0}: Error finding container 7cac377a1133aab87b9a71ed334ba6d1943555331007e0fcde4beaec8b4167df: Status 404 returned error can't find the container with id 7cac377a1133aab87b9a71ed334ba6d1943555331007e0fcde4beaec8b4167df Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.561838 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.584651 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv"] Jan 22 05:32:40 crc kubenswrapper[4814]: W0122 05:32:40.597642 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51023e61_a082_4097_8581_1451d02ef61a.slice/crio-8e652e1f4e00d2f84d19ad23375c7b5187f73f83eeb51745c40e2364aade2e40 WatchSource:0}: Error finding container 8e652e1f4e00d2f84d19ad23375c7b5187f73f83eeb51745c40e2364aade2e40: Status 404 returned error can't find the container with id 8e652e1f4e00d2f84d19ad23375c7b5187f73f83eeb51745c40e2364aade2e40 Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.608774 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.737360 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.747823 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.759034 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.764282 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc"] Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.776620 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vwl4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-55db956ddc-pzbz4_openstack-operators(a1db7589-a69f-44a1-8b2c-44ea0c49366c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.777112 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4"] Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.778444 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" podUID="a1db7589-a69f-44a1-8b2c-44ea0c49366c" Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.789014 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x"] Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.790471 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xw7tv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5d646b7d76-v45zc_openstack-operators(dd47b70e-0c66-4e11-9626-116a83513318): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.791667 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" podUID="dd47b70e-0c66-4e11-9626-116a83513318" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.818084 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d3c55b59cb192799f8d31196c55c9e9bb3cd38aef7ec51ef257dabf1548e8b30,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-69knj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-69d6c9f5b8-28z6x_openstack-operators(9b9196d5-18b7-4426-9506-ed3278b49437): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.819253 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" podUID="9b9196d5-18b7-4426-9506-ed3278b49437" Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.856057 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.865801 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5"] Jan 22 05:32:40 crc kubenswrapper[4814]: W0122 05:32:40.866151 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50ad8e85_d883_48ae_a3ea_fefd136db8ca.slice/crio-cda78c39c750165a24e70b78a462ebbf253aef7e07f593d509f789178e7c1c04 WatchSource:0}: Error finding container cda78c39c750165a24e70b78a462ebbf253aef7e07f593d509f789178e7c1c04: Status 404 returned error can't find the container with id cda78c39c750165a24e70b78a462ebbf253aef7e07f593d509f789178e7c1c04 Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.871927 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57"] Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.876994 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2rv7s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-hprbt_openstack-operators(77b0a214-c858-4157-924a-a9424d890683): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.878276 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" podUID="77b0a214-c858-4157-924a-a9424d890683" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.882720 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p8hrl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-85cd9769bb-pgjk5_openstack-operators(50ad8e85-d883-48ae-a3ea-fefd136db8ca): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.883896 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" podUID="50ad8e85-d883-48ae-a3ea-fefd136db8ca" Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.885699 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr"] Jan 22 05:32:40 crc kubenswrapper[4814]: I0122 05:32:40.889619 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm"] Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.891662 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vsjz4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-lvlrr_openstack-operators(1327ad33-b223-4c96-9b96-a20816f50f4d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.892810 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" podUID="1327ad33-b223-4c96-9b96-a20816f50f4d" Jan 22 05:32:40 crc kubenswrapper[4814]: W0122 05:32:40.893102 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0cd1e4f_a723_4eba_a656_fa6e261b17cb.slice/crio-aa35409b012b298ecbe906b640dee471c9405b5b095a6bd7667ff2f12ccd9ff9 WatchSource:0}: Error finding container aa35409b012b298ecbe906b640dee471c9405b5b095a6bd7667ff2f12ccd9ff9: Status 404 returned error can't find the container with id aa35409b012b298ecbe906b640dee471c9405b5b095a6bd7667ff2f12ccd9ff9 Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.895296 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p87f2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5ffb9c6597-glzvm_openstack-operators(c0cd1e4f-a723-4eba-a656-fa6e261b17cb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 22 05:32:40 crc kubenswrapper[4814]: E0122 05:32:40.896665 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" podUID="c0cd1e4f-a723-4eba-a656-fa6e261b17cb" Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.011083 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.011273 4814 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.014420 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert podName:e0bcc5d5-4473-4c8b-864a-ff88a0f75595 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:43.011333919 +0000 UTC m=+849.094822134 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" (UID: "e0bcc5d5-4473-4c8b-864a-ff88a0f75595") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.314481 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.314533 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.314660 4814 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.314694 4814 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.314712 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:43.314698447 +0000 UTC m=+849.398186662 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "webhook-server-cert" not found Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.314780 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:43.314762989 +0000 UTC m=+849.398251204 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "metrics-server-cert" not found Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.467021 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" event={"ID":"1327ad33-b223-4c96-9b96-a20816f50f4d","Type":"ContainerStarted","Data":"691f08554806fee1eaf13a1ac3f4a3c01d4c84c9df1232905a6527a52ce4ac62"} Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.469548 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" podUID="1327ad33-b223-4c96-9b96-a20816f50f4d" Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.470113 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" event={"ID":"34ce6a30-f048-4c7c-b02a-00a5409379d0","Type":"ContainerStarted","Data":"102dde2dfb4658f865bf8cbc96d13c25022f7edb318c2419655a81025f38890b"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.471977 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" event={"ID":"deb943fe-dd15-4874-a5cb-0f6ce4b2c291","Type":"ContainerStarted","Data":"7cac377a1133aab87b9a71ed334ba6d1943555331007e0fcde4beaec8b4167df"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.477289 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" event={"ID":"a1db7589-a69f-44a1-8b2c-44ea0c49366c","Type":"ContainerStarted","Data":"9f864d2c9a37b5e195f1648d8ebffef0012862230b3c7cf1be410e8d9f46d1a3"} Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.478777 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" podUID="a1db7589-a69f-44a1-8b2c-44ea0c49366c" Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.488726 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" event={"ID":"90b0a17b-e8e5-4ca4-9321-9a1980e27d70","Type":"ContainerStarted","Data":"d6c4b80f2bcfbf9fa37dc07130b6f113ddce977345181d7ed5d1f9854e46c894"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.493301 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" event={"ID":"c0cd1e4f-a723-4eba-a656-fa6e261b17cb","Type":"ContainerStarted","Data":"aa35409b012b298ecbe906b640dee471c9405b5b095a6bd7667ff2f12ccd9ff9"} Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.496019 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" podUID="c0cd1e4f-a723-4eba-a656-fa6e261b17cb" Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.497292 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" event={"ID":"668a0682-ff7d-4141-9b7b-1a6fd8f6eb28","Type":"ContainerStarted","Data":"e03c6a3e8828483fd28eb8fd674cdd11c3d8599578bade79b8687a44fda585c6"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.509281 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" event={"ID":"51023e61-a082-4097-8581-1451d02ef61a","Type":"ContainerStarted","Data":"8e652e1f4e00d2f84d19ad23375c7b5187f73f83eeb51745c40e2364aade2e40"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.515380 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" event={"ID":"dd47b70e-0c66-4e11-9626-116a83513318","Type":"ContainerStarted","Data":"caf38369cbd1b102c9f1e7188ae12971ec5e2397dd8e9c535cdefeb26e0dfd18"} Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.520489 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" podUID="dd47b70e-0c66-4e11-9626-116a83513318" Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.522904 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" event={"ID":"c71d95dc-90cc-4d59-85b1-5e43c670c034","Type":"ContainerStarted","Data":"eda303029378fce717bb1e0ab8e23a96a51349d8469172ce6c5fb0404601ccc3"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.532461 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" event={"ID":"9b9196d5-18b7-4426-9506-ed3278b49437","Type":"ContainerStarted","Data":"dfde1dc996e558e14965af9a4d9cd4507586bd3a4ff85295e82fcafe7ff65d3a"} Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.538028 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:d3c55b59cb192799f8d31196c55c9e9bb3cd38aef7ec51ef257dabf1548e8b30\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" podUID="9b9196d5-18b7-4426-9506-ed3278b49437" Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.540059 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" event={"ID":"e48491a3-9c69-4ebf-a97f-b6226e2b91ae","Type":"ContainerStarted","Data":"57c5eb0ad172afe8702fe997b5ab346d777a2e35c7c38ad3a08f93b77c44fcd2"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.542232 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" event={"ID":"50ad8e85-d883-48ae-a3ea-fefd136db8ca","Type":"ContainerStarted","Data":"cda78c39c750165a24e70b78a462ebbf253aef7e07f593d509f789178e7c1c04"} Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.548992 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" podUID="50ad8e85-d883-48ae-a3ea-fefd136db8ca" Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.566744 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" event={"ID":"10ebf9ef-4973-4395-8b4d-d916626341ee","Type":"ContainerStarted","Data":"18d13d0e70ed91a3be4cbd805cb8d73b44a60fccd11a62844f74ba091501d5a0"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.580251 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" event={"ID":"4c31a125-9a4c-46fc-99eb-cf5b563d5342","Type":"ContainerStarted","Data":"9c0290b2f1555bdfc8fd721d486064e1855208ccb4873897dae59fd1de0062ba"} Jan 22 05:32:41 crc kubenswrapper[4814]: I0122 05:32:41.585149 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" event={"ID":"77b0a214-c858-4157-924a-a9424d890683","Type":"ContainerStarted","Data":"6f0712ead8c0e3b2bc1e1ac53dd2d95622e71e55c91a26c5ae44d4cb7eb424bd"} Jan 22 05:32:41 crc kubenswrapper[4814]: E0122 05:32:41.586475 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" podUID="77b0a214-c858-4157-924a-a9424d890683" Jan 22 05:32:42 crc kubenswrapper[4814]: I0122 05:32:42.537513 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.537742 4814 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.537785 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert podName:2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a nodeName:}" failed. No retries permitted until 2026-01-22 05:32:46.537772539 +0000 UTC m=+852.621260754 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert") pod "infra-operator-controller-manager-54ccf4f85d-27czn" (UID: "2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a") : secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.598103 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:65cfe5b9d5b0571aaf8ff9840b12cc56e90ca4cef162dd260c3a9fa2b52c6dd0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" podUID="dd47b70e-0c66-4e11-9626-116a83513318" Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.598115 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:d3c55b59cb192799f8d31196c55c9e9bb3cd38aef7ec51ef257dabf1548e8b30\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" podUID="9b9196d5-18b7-4426-9506-ed3278b49437" Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.598368 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" podUID="c0cd1e4f-a723-4eba-a656-fa6e261b17cb" Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.598461 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" podUID="1327ad33-b223-4c96-9b96-a20816f50f4d" Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.600555 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" podUID="50ad8e85-d883-48ae-a3ea-fefd136db8ca" Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.601070 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" podUID="a1db7589-a69f-44a1-8b2c-44ea0c49366c" Jan 22 05:32:42 crc kubenswrapper[4814]: E0122 05:32:42.601315 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" podUID="77b0a214-c858-4157-924a-a9424d890683" Jan 22 05:32:43 crc kubenswrapper[4814]: I0122 05:32:43.044228 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:43 crc kubenswrapper[4814]: E0122 05:32:43.044462 4814 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:43 crc kubenswrapper[4814]: E0122 05:32:43.044557 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert podName:e0bcc5d5-4473-4c8b-864a-ff88a0f75595 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:47.044533341 +0000 UTC m=+853.128021556 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" (UID: "e0bcc5d5-4473-4c8b-864a-ff88a0f75595") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:43 crc kubenswrapper[4814]: I0122 05:32:43.347909 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:43 crc kubenswrapper[4814]: I0122 05:32:43.347962 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:43 crc kubenswrapper[4814]: E0122 05:32:43.348053 4814 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 05:32:43 crc kubenswrapper[4814]: E0122 05:32:43.348079 4814 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 05:32:43 crc kubenswrapper[4814]: E0122 05:32:43.348111 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:47.348094656 +0000 UTC m=+853.431582871 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "metrics-server-cert" not found Jan 22 05:32:43 crc kubenswrapper[4814]: E0122 05:32:43.348132 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:47.348118327 +0000 UTC m=+853.431606542 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "webhook-server-cert" not found Jan 22 05:32:46 crc kubenswrapper[4814]: I0122 05:32:46.598217 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:46 crc kubenswrapper[4814]: E0122 05:32:46.598340 4814 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:46 crc kubenswrapper[4814]: E0122 05:32:46.598578 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert podName:2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a nodeName:}" failed. No retries permitted until 2026-01-22 05:32:54.59856317 +0000 UTC m=+860.682051385 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert") pod "infra-operator-controller-manager-54ccf4f85d-27czn" (UID: "2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a") : secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:47 crc kubenswrapper[4814]: I0122 05:32:47.108286 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:47 crc kubenswrapper[4814]: E0122 05:32:47.108517 4814 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:47 crc kubenswrapper[4814]: E0122 05:32:47.108610 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert podName:e0bcc5d5-4473-4c8b-864a-ff88a0f75595 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:55.108585393 +0000 UTC m=+861.192073648 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" (UID: "e0bcc5d5-4473-4c8b-864a-ff88a0f75595") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 22 05:32:47 crc kubenswrapper[4814]: I0122 05:32:47.413565 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:47 crc kubenswrapper[4814]: I0122 05:32:47.413741 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:47 crc kubenswrapper[4814]: E0122 05:32:47.413889 4814 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 22 05:32:47 crc kubenswrapper[4814]: E0122 05:32:47.413918 4814 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 05:32:47 crc kubenswrapper[4814]: E0122 05:32:47.414034 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:55.413998705 +0000 UTC m=+861.497486950 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "metrics-server-cert" not found Jan 22 05:32:47 crc kubenswrapper[4814]: E0122 05:32:47.414097 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:32:55.414074177 +0000 UTC m=+861.497562422 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "webhook-server-cert" not found Jan 22 05:32:54 crc kubenswrapper[4814]: I0122 05:32:54.643894 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:32:54 crc kubenswrapper[4814]: E0122 05:32:54.644722 4814 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:54 crc kubenswrapper[4814]: E0122 05:32:54.644771 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert podName:2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a nodeName:}" failed. No retries permitted until 2026-01-22 05:33:10.64475782 +0000 UTC m=+876.728246035 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert") pod "infra-operator-controller-manager-54ccf4f85d-27czn" (UID: "2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a") : secret "infra-operator-webhook-server-cert" not found Jan 22 05:32:55 crc kubenswrapper[4814]: I0122 05:32:55.150999 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:55 crc kubenswrapper[4814]: I0122 05:32:55.157816 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e0bcc5d5-4473-4c8b-864a-ff88a0f75595-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b85446lnr\" (UID: \"e0bcc5d5-4473-4c8b-864a-ff88a0f75595\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:55 crc kubenswrapper[4814]: I0122 05:32:55.201459 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:32:55 crc kubenswrapper[4814]: I0122 05:32:55.454744 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:55 crc kubenswrapper[4814]: I0122 05:32:55.455136 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:55 crc kubenswrapper[4814]: E0122 05:32:55.455313 4814 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 22 05:32:55 crc kubenswrapper[4814]: E0122 05:32:55.455393 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs podName:f561ac0f-4a79-4e31-a0eb-a3b2c81b4524 nodeName:}" failed. No retries permitted until 2026-01-22 05:33:11.455375956 +0000 UTC m=+877.538864171 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs") pod "openstack-operator-controller-manager-58546c67cc-hknfz" (UID: "f561ac0f-4a79-4e31-a0eb-a3b2c81b4524") : secret "webhook-server-cert" not found Jan 22 05:32:55 crc kubenswrapper[4814]: I0122 05:32:55.459217 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-metrics-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:32:57 crc kubenswrapper[4814]: E0122 05:32:57.619592 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8" Jan 22 05:32:57 crc kubenswrapper[4814]: E0122 05:32:57.620236 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2hgdq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-78c6999f6f-hc6fj_openstack-operators(34ce6a30-f048-4c7c-b02a-00a5409379d0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:32:57 crc kubenswrapper[4814]: E0122 05:32:57.621495 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" podUID="34ce6a30-f048-4c7c-b02a-00a5409379d0" Jan 22 05:32:57 crc kubenswrapper[4814]: E0122 05:32:57.708551 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:8bee4480babd6fd8f686e0ba52a304acb6ffb90f09c7c57e7f5df5f7658836d8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" podUID="34ce6a30-f048-4c7c-b02a-00a5409379d0" Jan 22 05:32:59 crc kubenswrapper[4814]: E0122 05:32:59.161062 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922" Jan 22 05:32:59 crc kubenswrapper[4814]: E0122 05:32:59.161257 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r7g4b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-czd57_openstack-operators(90b0a17b-e8e5-4ca4-9321-9a1980e27d70): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:32:59 crc kubenswrapper[4814]: E0122 05:32:59.162470 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" podUID="90b0a17b-e8e5-4ca4-9321-9a1980e27d70" Jan 22 05:32:59 crc kubenswrapper[4814]: E0122 05:32:59.736009 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" podUID="90b0a17b-e8e5-4ca4-9321-9a1980e27d70" Jan 22 05:33:02 crc kubenswrapper[4814]: E0122 05:33:02.359764 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492" Jan 22 05:33:02 crc kubenswrapper[4814]: E0122 05:33:02.360143 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-htpj6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-594c8c9d5d-nmspv_openstack-operators(51023e61-a082-4097-8581-1451d02ef61a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:33:02 crc kubenswrapper[4814]: E0122 05:33:02.361682 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" podUID="51023e61-a082-4097-8581-1451d02ef61a" Jan 22 05:33:02 crc kubenswrapper[4814]: E0122 05:33:02.752424 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492\\\"\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" podUID="51023e61-a082-4097-8581-1451d02ef61a" Jan 22 05:33:04 crc kubenswrapper[4814]: E0122 05:33:04.490939 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5" Jan 22 05:33:04 crc kubenswrapper[4814]: E0122 05:33:04.492264 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j766v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7bd9774b6-qr7k2_openstack-operators(10ebf9ef-4973-4395-8b4d-d916626341ee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:33:04 crc kubenswrapper[4814]: E0122 05:33:04.493491 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" podUID="10ebf9ef-4973-4395-8b4d-d916626341ee" Jan 22 05:33:04 crc kubenswrapper[4814]: E0122 05:33:04.774189 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:a8fc8f9d445b1232f446119015b226008b07c6a259f5bebc1fcbb39ec310afe5\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" podUID="10ebf9ef-4973-4395-8b4d-d916626341ee" Jan 22 05:33:07 crc kubenswrapper[4814]: E0122 05:33:07.230980 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349" Jan 22 05:33:07 crc kubenswrapper[4814]: E0122 05:33:07.231148 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nnw4g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b8b6d4659-4csc6_openstack-operators(668a0682-ff7d-4141-9b7b-1a6fd8f6eb28): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:33:07 crc kubenswrapper[4814]: E0122 05:33:07.232336 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" podUID="668a0682-ff7d-4141-9b7b-1a6fd8f6eb28" Jan 22 05:33:07 crc kubenswrapper[4814]: E0122 05:33:07.792269 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" podUID="668a0682-ff7d-4141-9b7b-1a6fd8f6eb28" Jan 22 05:33:10 crc kubenswrapper[4814]: I0122 05:33:10.706695 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:33:10 crc kubenswrapper[4814]: I0122 05:33:10.721604 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-27czn\" (UID: \"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:33:10 crc kubenswrapper[4814]: I0122 05:33:10.833500 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:33:11 crc kubenswrapper[4814]: I0122 05:33:11.517328 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:33:11 crc kubenswrapper[4814]: I0122 05:33:11.521893 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f561ac0f-4a79-4e31-a0eb-a3b2c81b4524-webhook-certs\") pod \"openstack-operator-controller-manager-58546c67cc-hknfz\" (UID: \"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524\") " pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:33:11 crc kubenswrapper[4814]: I0122 05:33:11.654209 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:33:14 crc kubenswrapper[4814]: E0122 05:33:14.766072 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831" Jan 22 05:33:14 crc kubenswrapper[4814]: E0122 05:33:14.766555 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nmj47,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-6b8bc8d87d-ht9kj_openstack-operators(c71d95dc-90cc-4d59-85b1-5e43c670c034): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:33:14 crc kubenswrapper[4814]: E0122 05:33:14.767669 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" podUID="c71d95dc-90cc-4d59-85b1-5e43c670c034" Jan 22 05:33:14 crc kubenswrapper[4814]: E0122 05:33:14.859522 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831\\\"\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" podUID="c71d95dc-90cc-4d59-85b1-5e43c670c034" Jan 22 05:33:15 crc kubenswrapper[4814]: E0122 05:33:15.364104 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 22 05:33:15 crc kubenswrapper[4814]: E0122 05:33:15.364270 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vsjz4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-lvlrr_openstack-operators(1327ad33-b223-4c96-9b96-a20816f50f4d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:33:15 crc kubenswrapper[4814]: E0122 05:33:15.365509 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" podUID="1327ad33-b223-4c96-9b96-a20816f50f4d" Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.826785 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-h7fd2"] Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.833061 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.838486 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h7fd2"] Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.911278 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz"] Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.912543 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" event={"ID":"f07d8bf3-82b4-4d63-982b-e8e423ee422b","Type":"ContainerStarted","Data":"43595400e543f0ce258a374a81a1366969c2f2a68b5bd53e54e688fda95aaae1"} Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.912644 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.937804 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" event={"ID":"03b37176-5f03-44da-b6e5-1d1364483db3","Type":"ContainerStarted","Data":"452d0f7771e72ef64add6c3642b08def540736fc410a796fe38086f2a38e3943"} Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.938577 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.971312 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" event={"ID":"deb943fe-dd15-4874-a5cb-0f6ce4b2c291","Type":"ContainerStarted","Data":"9a0cb08d64347203ac1ba6dd83ae28edd0c6dffcad56691e7a6ec07266625512"} Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.971992 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.973414 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" podStartSLOduration=9.250522089 podStartE2EDuration="37.973395544s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.311616546 +0000 UTC m=+846.395104761" lastFinishedPulling="2026-01-22 05:33:09.034490001 +0000 UTC m=+875.117978216" observedRunningTime="2026-01-22 05:33:15.935960272 +0000 UTC m=+882.019448487" watchObservedRunningTime="2026-01-22 05:33:15.973395544 +0000 UTC m=+882.056883759" Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.976334 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" podStartSLOduration=7.636183631 podStartE2EDuration="37.976328385s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.279675594 +0000 UTC m=+846.363163809" lastFinishedPulling="2026-01-22 05:33:10.619820308 +0000 UTC m=+876.703308563" observedRunningTime="2026-01-22 05:33:15.972174876 +0000 UTC m=+882.055663091" watchObservedRunningTime="2026-01-22 05:33:15.976328385 +0000 UTC m=+882.059816600" Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.989186 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr"] Jan 22 05:33:15 crc kubenswrapper[4814]: I0122 05:33:15.996895 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" podStartSLOduration=9.52847451 podStartE2EDuration="37.996876924s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.566884731 +0000 UTC m=+846.650372956" lastFinishedPulling="2026-01-22 05:33:09.035287135 +0000 UTC m=+875.118775370" observedRunningTime="2026-01-22 05:33:15.992571139 +0000 UTC m=+882.076059354" watchObservedRunningTime="2026-01-22 05:33:15.996876924 +0000 UTC m=+882.080365139" Jan 22 05:33:16 crc kubenswrapper[4814]: W0122 05:33:16.023747 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf561ac0f_4a79_4e31_a0eb_a3b2c81b4524.slice/crio-f4edbe8b69e3c59d220b6a9aa363f6816541259a5156e1628158b5dfabc06aa0 WatchSource:0}: Error finding container f4edbe8b69e3c59d220b6a9aa363f6816541259a5156e1628158b5dfabc06aa0: Status 404 returned error can't find the container with id f4edbe8b69e3c59d220b6a9aa363f6816541259a5156e1628158b5dfabc06aa0 Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.030742 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhvqf\" (UniqueName: \"kubernetes.io/projected/8455e598-45fe-4009-b653-fc98f17baf96-kube-api-access-nhvqf\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.030833 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-catalog-content\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.030931 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-utilities\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: W0122 05:33:16.092304 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode0bcc5d5_4473_4c8b_864a_ff88a0f75595.slice/crio-3a20693795a9acf71aee80cd8ba8d2de5f3f0c2cd64dedada1edbcedb44814cc WatchSource:0}: Error finding container 3a20693795a9acf71aee80cd8ba8d2de5f3f0c2cd64dedada1edbcedb44814cc: Status 404 returned error can't find the container with id 3a20693795a9acf71aee80cd8ba8d2de5f3f0c2cd64dedada1edbcedb44814cc Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.135667 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-catalog-content\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.135737 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-utilities\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.135798 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhvqf\" (UniqueName: \"kubernetes.io/projected/8455e598-45fe-4009-b653-fc98f17baf96-kube-api-access-nhvqf\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.136455 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-catalog-content\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.136702 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-utilities\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.166117 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhvqf\" (UniqueName: \"kubernetes.io/projected/8455e598-45fe-4009-b653-fc98f17baf96-kube-api-access-nhvqf\") pod \"certified-operators-h7fd2\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.187942 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.261285 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn"] Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.978167 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" event={"ID":"4c31a125-9a4c-46fc-99eb-cf5b563d5342","Type":"ContainerStarted","Data":"0f7a6e81a101e7722ecdb86f968df5db7a3856d377bca8c8ddc78bcaf8a34809"} Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.978941 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.979181 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" event={"ID":"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a","Type":"ContainerStarted","Data":"7b1be18bd14399e4b6a197fe76e9f286606f774d3065eb715b0bbe3b4214ea9e"} Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.980365 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" event={"ID":"44e4f657-acdf-4258-abf1-ae2dc3e6efd3","Type":"ContainerStarted","Data":"2f452297ebec39000a0209a817278b1a64bb48b84187f7a6a505920100fb8c45"} Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.980501 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.981452 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" event={"ID":"a1db7589-a69f-44a1-8b2c-44ea0c49366c","Type":"ContainerStarted","Data":"e1a738ad57f1ad6fa557bbe06b8076228da04057af71d6a550b8b5fe1b10fcad"} Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.981584 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.982684 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" event={"ID":"e48491a3-9c69-4ebf-a97f-b6226e2b91ae","Type":"ContainerStarted","Data":"f0903b31fb9e24753f73803a11d473701df6f94b61a4ad003b099225e1184683"} Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.982801 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.983976 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" event={"ID":"7a390991-5dac-4dd5-8afb-996222205b63","Type":"ContainerStarted","Data":"cd5e26aad8e6ace7d5427566e99fc04c3c66ea2d7315648f753772414d7ce3bb"} Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.984159 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.985164 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" event={"ID":"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524","Type":"ContainerStarted","Data":"f4edbe8b69e3c59d220b6a9aa363f6816541259a5156e1628158b5dfabc06aa0"} Jan 22 05:33:16 crc kubenswrapper[4814]: I0122 05:33:16.987348 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" event={"ID":"e0bcc5d5-4473-4c8b-864a-ff88a0f75595","Type":"ContainerStarted","Data":"3a20693795a9acf71aee80cd8ba8d2de5f3f0c2cd64dedada1edbcedb44814cc"} Jan 22 05:33:17 crc kubenswrapper[4814]: I0122 05:33:17.010181 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" podStartSLOduration=9.134393175 podStartE2EDuration="39.010165342s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.743660699 +0000 UTC m=+846.827148914" lastFinishedPulling="2026-01-22 05:33:10.619432826 +0000 UTC m=+876.702921081" observedRunningTime="2026-01-22 05:33:17.002947697 +0000 UTC m=+883.086435912" watchObservedRunningTime="2026-01-22 05:33:17.010165342 +0000 UTC m=+883.093653557" Jan 22 05:33:17 crc kubenswrapper[4814]: I0122 05:33:17.049855 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h7fd2"] Jan 22 05:33:17 crc kubenswrapper[4814]: I0122 05:33:17.053194 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" podStartSLOduration=4.347557052 podStartE2EDuration="39.053172836s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.776441256 +0000 UTC m=+846.859929471" lastFinishedPulling="2026-01-22 05:33:15.48205704 +0000 UTC m=+881.565545255" observedRunningTime="2026-01-22 05:33:17.049738201 +0000 UTC m=+883.133226406" watchObservedRunningTime="2026-01-22 05:33:17.053172836 +0000 UTC m=+883.136661051" Jan 22 05:33:17 crc kubenswrapper[4814]: I0122 05:33:17.072847 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" podStartSLOduration=10.479496765 podStartE2EDuration="39.072832547s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.442417297 +0000 UTC m=+846.525905512" lastFinishedPulling="2026-01-22 05:33:09.035753079 +0000 UTC m=+875.119241294" observedRunningTime="2026-01-22 05:33:17.070972639 +0000 UTC m=+883.154460844" watchObservedRunningTime="2026-01-22 05:33:17.072832547 +0000 UTC m=+883.156320762" Jan 22 05:33:17 crc kubenswrapper[4814]: I0122 05:33:17.100001 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" podStartSLOduration=8.867589721 podStartE2EDuration="39.09998458s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.385491169 +0000 UTC m=+846.468979384" lastFinishedPulling="2026-01-22 05:33:10.617885998 +0000 UTC m=+876.701374243" observedRunningTime="2026-01-22 05:33:17.096023647 +0000 UTC m=+883.179511862" watchObservedRunningTime="2026-01-22 05:33:17.09998458 +0000 UTC m=+883.183472795" Jan 22 05:33:17 crc kubenswrapper[4814]: I0122 05:33:17.128730 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" podStartSLOduration=10.35243003 podStartE2EDuration="39.128715862s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.259393445 +0000 UTC m=+846.342881660" lastFinishedPulling="2026-01-22 05:33:09.035679277 +0000 UTC m=+875.119167492" observedRunningTime="2026-01-22 05:33:17.124341846 +0000 UTC m=+883.207830061" watchObservedRunningTime="2026-01-22 05:33:17.128715862 +0000 UTC m=+883.212204077" Jan 22 05:33:17 crc kubenswrapper[4814]: I0122 05:33:17.994521 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fd2" event={"ID":"8455e598-45fe-4009-b653-fc98f17baf96","Type":"ContainerStarted","Data":"f4fc235afd254bec61fcedfd7da1a4fc87f52e90c36aeed582b34e1fe0c0ce8a"} Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.690451 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9zrv4"] Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.692214 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.715458 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9zrv4"] Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.817607 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-utilities\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.817710 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56x7w\" (UniqueName: \"kubernetes.io/projected/7367f220-f4f6-471b-9ae0-6aa5adda56be-kube-api-access-56x7w\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.817734 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-catalog-content\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.918777 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-utilities\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.918866 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56x7w\" (UniqueName: \"kubernetes.io/projected/7367f220-f4f6-471b-9ae0-6aa5adda56be-kube-api-access-56x7w\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.918891 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-catalog-content\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.919699 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-utilities\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.920211 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-catalog-content\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:19 crc kubenswrapper[4814]: I0122 05:33:19.937543 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56x7w\" (UniqueName: \"kubernetes.io/projected/7367f220-f4f6-471b-9ae0-6aa5adda56be-kube-api-access-56x7w\") pod \"community-operators-9zrv4\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.022660 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.026533 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" event={"ID":"34ce6a30-f048-4c7c-b02a-00a5409379d0","Type":"ContainerStarted","Data":"fd345f8c601126b9b59a60b34f4a58a00b0300c8e0ff2c2395b64b61d814b7e0"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.027797 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" event={"ID":"c0cd1e4f-a723-4eba-a656-fa6e261b17cb","Type":"ContainerStarted","Data":"7a50f3693a7477f2fac996a6b8fe19cd61b1b5c7dc00155cf274acf40278ecc0"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.028877 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" event={"ID":"50ad8e85-d883-48ae-a3ea-fefd136db8ca","Type":"ContainerStarted","Data":"18f4c16a1f226eb8f1f82bc6c36be5f7c67bec154c364258ea44fbda82e38456"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.030021 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" event={"ID":"90b0a17b-e8e5-4ca4-9321-9a1980e27d70","Type":"ContainerStarted","Data":"406105f9bc088761b8063ac43006f4f54bfa95a5d516254b48d248f4ccb58e1e"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.031249 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" event={"ID":"77b0a214-c858-4157-924a-a9424d890683","Type":"ContainerStarted","Data":"b2c9e3ec1acb178fb92df93dbf5d9a78fdbf02972eaca898db30f48d9fafcfb2"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.033568 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" event={"ID":"f561ac0f-4a79-4e31-a0eb-a3b2c81b4524","Type":"ContainerStarted","Data":"ed3ba1bfdcf34b911955dcd166ac362d3bfd65261be729a2b96d1cb0e22a5710"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.035039 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" event={"ID":"51023e61-a082-4097-8581-1451d02ef61a","Type":"ContainerStarted","Data":"d4528a6385038319ad9920144d087dac8c492ddc7dd19163311d69e5729ee26a"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.036375 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" event={"ID":"dd47b70e-0c66-4e11-9626-116a83513318","Type":"ContainerStarted","Data":"fe00d2cf123d115a2f624360fd56316473ef16e212a2125b7652c03f75d5792d"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.037568 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" event={"ID":"9b9196d5-18b7-4426-9506-ed3278b49437","Type":"ContainerStarted","Data":"a854f9b71e3722bff6f9dae95a3ae0502ed73c746fd427d4628c2f052c954a94"} Jan 22 05:33:20 crc kubenswrapper[4814]: I0122 05:33:20.502434 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9zrv4"] Jan 22 05:33:20 crc kubenswrapper[4814]: W0122 05:33:20.524750 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7367f220_f4f6_471b_9ae0_6aa5adda56be.slice/crio-13a297deef451747ba4babbb25f5c4312f79e5fd9faedf0a5c120d795959eec2 WatchSource:0}: Error finding container 13a297deef451747ba4babbb25f5c4312f79e5fd9faedf0a5c120d795959eec2: Status 404 returned error can't find the container with id 13a297deef451747ba4babbb25f5c4312f79e5fd9faedf0a5c120d795959eec2 Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.048063 4814 generic.go:334] "Generic (PLEG): container finished" podID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerID="03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2" exitCode=0 Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.048123 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9zrv4" event={"ID":"7367f220-f4f6-471b-9ae0-6aa5adda56be","Type":"ContainerDied","Data":"03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2"} Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.048147 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9zrv4" event={"ID":"7367f220-f4f6-471b-9ae0-6aa5adda56be","Type":"ContainerStarted","Data":"13a297deef451747ba4babbb25f5c4312f79e5fd9faedf0a5c120d795959eec2"} Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.049418 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.053870 4814 generic.go:334] "Generic (PLEG): container finished" podID="8455e598-45fe-4009-b653-fc98f17baf96" containerID="ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f" exitCode=0 Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.054008 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fd2" event={"ID":"8455e598-45fe-4009-b653-fc98f17baf96","Type":"ContainerDied","Data":"ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f"} Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.054280 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.115043 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" podStartSLOduration=8.340189627 podStartE2EDuration="43.1150245s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.876757301 +0000 UTC m=+846.960245516" lastFinishedPulling="2026-01-22 05:33:15.651592174 +0000 UTC m=+881.735080389" observedRunningTime="2026-01-22 05:33:21.111391728 +0000 UTC m=+887.194879933" watchObservedRunningTime="2026-01-22 05:33:21.1150245 +0000 UTC m=+887.198512715" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.161770 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" podStartSLOduration=8.489791851 podStartE2EDuration="43.161756411s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.817925714 +0000 UTC m=+846.901413929" lastFinishedPulling="2026-01-22 05:33:15.489890274 +0000 UTC m=+881.573378489" observedRunningTime="2026-01-22 05:33:21.13565133 +0000 UTC m=+887.219139545" watchObservedRunningTime="2026-01-22 05:33:21.161756411 +0000 UTC m=+887.245244626" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.163688 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" podStartSLOduration=8.375090552 podStartE2EDuration="43.163683281s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.738331704 +0000 UTC m=+846.821819919" lastFinishedPulling="2026-01-22 05:33:15.526924433 +0000 UTC m=+881.610412648" observedRunningTime="2026-01-22 05:33:21.16239171 +0000 UTC m=+887.245879925" watchObservedRunningTime="2026-01-22 05:33:21.163683281 +0000 UTC m=+887.247171496" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.189496 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" podStartSLOduration=7.589326203 podStartE2EDuration="42.189480642s" podCreationTimestamp="2026-01-22 05:32:39 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.882565341 +0000 UTC m=+846.966053556" lastFinishedPulling="2026-01-22 05:33:15.48271978 +0000 UTC m=+881.566207995" observedRunningTime="2026-01-22 05:33:21.183897678 +0000 UTC m=+887.267385883" watchObservedRunningTime="2026-01-22 05:33:21.189480642 +0000 UTC m=+887.272968857" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.211518 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" podStartSLOduration=7.7358 podStartE2EDuration="42.211500565s" podCreationTimestamp="2026-01-22 05:32:39 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.895193684 +0000 UTC m=+846.978681899" lastFinishedPulling="2026-01-22 05:33:15.370894249 +0000 UTC m=+881.454382464" observedRunningTime="2026-01-22 05:33:21.210531665 +0000 UTC m=+887.294019880" watchObservedRunningTime="2026-01-22 05:33:21.211500565 +0000 UTC m=+887.294988770" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.230018 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" podStartSLOduration=8.516907344 podStartE2EDuration="43.229993839s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.790042879 +0000 UTC m=+846.873531104" lastFinishedPulling="2026-01-22 05:33:15.503129384 +0000 UTC m=+881.586617599" observedRunningTime="2026-01-22 05:33:21.229437853 +0000 UTC m=+887.312926068" watchObservedRunningTime="2026-01-22 05:33:21.229993839 +0000 UTC m=+887.313482054" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.250162 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" podStartSLOduration=7.644190555 podStartE2EDuration="42.250148805s" podCreationTimestamp="2026-01-22 05:32:39 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.876841653 +0000 UTC m=+846.960329858" lastFinishedPulling="2026-01-22 05:33:15.482799893 +0000 UTC m=+881.566288108" observedRunningTime="2026-01-22 05:33:21.246815821 +0000 UTC m=+887.330304036" watchObservedRunningTime="2026-01-22 05:33:21.250148805 +0000 UTC m=+887.333637020" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.285176 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" podStartSLOduration=42.285154802 podStartE2EDuration="42.285154802s" podCreationTimestamp="2026-01-22 05:32:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:33:21.28445912 +0000 UTC m=+887.367947335" watchObservedRunningTime="2026-01-22 05:33:21.285154802 +0000 UTC m=+887.368643017" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.320161 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" podStartSLOduration=8.396086313 podStartE2EDuration="43.320146048s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.601734233 +0000 UTC m=+846.685222448" lastFinishedPulling="2026-01-22 05:33:15.525793968 +0000 UTC m=+881.609282183" observedRunningTime="2026-01-22 05:33:21.318327312 +0000 UTC m=+887.401815527" watchObservedRunningTime="2026-01-22 05:33:21.320146048 +0000 UTC m=+887.403634263" Jan 22 05:33:21 crc kubenswrapper[4814]: I0122 05:33:21.654693 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:33:22 crc kubenswrapper[4814]: I0122 05:33:22.062647 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" event={"ID":"10ebf9ef-4973-4395-8b4d-d916626341ee","Type":"ContainerStarted","Data":"04e7f2488196de92e2ea609dd13725e8bbe06d036c0444796c0da12289af351c"} Jan 22 05:33:22 crc kubenswrapper[4814]: I0122 05:33:22.063681 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" Jan 22 05:33:22 crc kubenswrapper[4814]: I0122 05:33:22.081689 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" podStartSLOduration=2.939812708 podStartE2EDuration="44.08167458s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.578421729 +0000 UTC m=+846.661909944" lastFinishedPulling="2026-01-22 05:33:21.720283591 +0000 UTC m=+887.803771816" observedRunningTime="2026-01-22 05:33:22.077915803 +0000 UTC m=+888.161404018" watchObservedRunningTime="2026-01-22 05:33:22.08167458 +0000 UTC m=+888.165162795" Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.090490 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fd2" event={"ID":"8455e598-45fe-4009-b653-fc98f17baf96","Type":"ContainerStarted","Data":"adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174"} Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.092395 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" event={"ID":"2f5ca5f9-39ed-4cf3-b4e4-8cc42d707e9a","Type":"ContainerStarted","Data":"80e10a524c8ad004ff615df77c39cb4747fb889c4a53da042f65f8ce6f825d75"} Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.093142 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.095029 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" event={"ID":"668a0682-ff7d-4141-9b7b-1a6fd8f6eb28","Type":"ContainerStarted","Data":"5f9dc4c672ea0b126124aa4c2b110224b964d21522f69a41c2e6eb4d493ff1be"} Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.095405 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.097530 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9zrv4" event={"ID":"7367f220-f4f6-471b-9ae0-6aa5adda56be","Type":"ContainerStarted","Data":"8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193"} Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.099135 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" event={"ID":"e0bcc5d5-4473-4c8b-864a-ff88a0f75595","Type":"ContainerStarted","Data":"c74200853e97196f3dc0a9dc436af937347bac54209a03971839751a9ff596a0"} Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.099296 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.157347 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" podStartSLOduration=39.287045018 podStartE2EDuration="47.157330667s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:33:16.105777544 +0000 UTC m=+882.189265759" lastFinishedPulling="2026-01-22 05:33:23.976063153 +0000 UTC m=+890.059551408" observedRunningTime="2026-01-22 05:33:25.1526046 +0000 UTC m=+891.236092865" watchObservedRunningTime="2026-01-22 05:33:25.157330667 +0000 UTC m=+891.240818882" Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.198555 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" podStartSLOduration=3.804967949 podStartE2EDuration="47.198538596s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.590829565 +0000 UTC m=+846.674317780" lastFinishedPulling="2026-01-22 05:33:23.984400212 +0000 UTC m=+890.067888427" observedRunningTime="2026-01-22 05:33:25.194267433 +0000 UTC m=+891.277755648" watchObservedRunningTime="2026-01-22 05:33:25.198538596 +0000 UTC m=+891.282026801" Jan 22 05:33:25 crc kubenswrapper[4814]: I0122 05:33:25.222687 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" podStartSLOduration=39.528530494 podStartE2EDuration="47.222671835s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:33:16.289546519 +0000 UTC m=+882.373034744" lastFinishedPulling="2026-01-22 05:33:23.98368787 +0000 UTC m=+890.067176085" observedRunningTime="2026-01-22 05:33:25.219760165 +0000 UTC m=+891.303248380" watchObservedRunningTime="2026-01-22 05:33:25.222671835 +0000 UTC m=+891.306160050" Jan 22 05:33:26 crc kubenswrapper[4814]: I0122 05:33:26.107046 4814 generic.go:334] "Generic (PLEG): container finished" podID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerID="8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193" exitCode=0 Jan 22 05:33:26 crc kubenswrapper[4814]: I0122 05:33:26.107098 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9zrv4" event={"ID":"7367f220-f4f6-471b-9ae0-6aa5adda56be","Type":"ContainerDied","Data":"8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193"} Jan 22 05:33:26 crc kubenswrapper[4814]: I0122 05:33:26.110937 4814 generic.go:334] "Generic (PLEG): container finished" podID="8455e598-45fe-4009-b653-fc98f17baf96" containerID="adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174" exitCode=0 Jan 22 05:33:26 crc kubenswrapper[4814]: I0122 05:33:26.111171 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fd2" event={"ID":"8455e598-45fe-4009-b653-fc98f17baf96","Type":"ContainerDied","Data":"adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174"} Jan 22 05:33:27 crc kubenswrapper[4814]: I0122 05:33:27.119139 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" event={"ID":"c71d95dc-90cc-4d59-85b1-5e43c670c034","Type":"ContainerStarted","Data":"d6474791e6d83ed324627fce2897560eda1a16c4a625babc5fb774d0ef6fe0ed"} Jan 22 05:33:27 crc kubenswrapper[4814]: I0122 05:33:27.119615 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" Jan 22 05:33:27 crc kubenswrapper[4814]: I0122 05:33:27.122617 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9zrv4" event={"ID":"7367f220-f4f6-471b-9ae0-6aa5adda56be","Type":"ContainerStarted","Data":"8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7"} Jan 22 05:33:27 crc kubenswrapper[4814]: I0122 05:33:27.125023 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fd2" event={"ID":"8455e598-45fe-4009-b653-fc98f17baf96","Type":"ContainerStarted","Data":"fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa"} Jan 22 05:33:27 crc kubenswrapper[4814]: I0122 05:33:27.136933 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" podStartSLOduration=3.117820005 podStartE2EDuration="49.136911994s" podCreationTimestamp="2026-01-22 05:32:38 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.750490942 +0000 UTC m=+846.833979157" lastFinishedPulling="2026-01-22 05:33:26.769582931 +0000 UTC m=+892.853071146" observedRunningTime="2026-01-22 05:33:27.134384056 +0000 UTC m=+893.217872271" watchObservedRunningTime="2026-01-22 05:33:27.136911994 +0000 UTC m=+893.220400209" Jan 22 05:33:27 crc kubenswrapper[4814]: I0122 05:33:27.158500 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9zrv4" podStartSLOduration=2.683895601 podStartE2EDuration="8.158480534s" podCreationTimestamp="2026-01-22 05:33:19 +0000 UTC" firstStartedPulling="2026-01-22 05:33:21.049223207 +0000 UTC m=+887.132711422" lastFinishedPulling="2026-01-22 05:33:26.52380813 +0000 UTC m=+892.607296355" observedRunningTime="2026-01-22 05:33:27.15286022 +0000 UTC m=+893.236348455" watchObservedRunningTime="2026-01-22 05:33:27.158480534 +0000 UTC m=+893.241968759" Jan 22 05:33:27 crc kubenswrapper[4814]: I0122 05:33:27.175922 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-h7fd2" podStartSLOduration=6.651882287 podStartE2EDuration="12.175900545s" podCreationTimestamp="2026-01-22 05:33:15 +0000 UTC" firstStartedPulling="2026-01-22 05:33:21.056016018 +0000 UTC m=+887.139504233" lastFinishedPulling="2026-01-22 05:33:26.580034266 +0000 UTC m=+892.663522491" observedRunningTime="2026-01-22 05:33:27.173721137 +0000 UTC m=+893.257209352" watchObservedRunningTime="2026-01-22 05:33:27.175900545 +0000 UTC m=+893.259388770" Jan 22 05:33:28 crc kubenswrapper[4814]: I0122 05:33:28.814892 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-wl699" Jan 22 05:33:28 crc kubenswrapper[4814]: I0122 05:33:28.845621 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-vrv59" Jan 22 05:33:28 crc kubenswrapper[4814]: I0122 05:33:28.850620 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mtt48" Jan 22 05:33:28 crc kubenswrapper[4814]: I0122 05:33:28.908462 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" Jan 22 05:33:28 crc kubenswrapper[4814]: I0122 05:33:28.909815 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-8wkcr" Jan 22 05:33:28 crc kubenswrapper[4814]: I0122 05:33:28.916205 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-nmspv" Jan 22 05:33:28 crc kubenswrapper[4814]: I0122 05:33:28.968448 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-d65cp" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.061060 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-4csc6" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.111826 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.114028 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-28z6x" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.150328 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-hc6fj" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.209230 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-lr89d" Jan 22 05:33:29 crc kubenswrapper[4814]: E0122 05:33:29.346299 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" podUID="1327ad33-b223-4c96-9b96-a20816f50f4d" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.407783 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-qr7k2" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.428790 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-pzbz4" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.493752 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-zlp6t" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.665531 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.667955 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-v45zc" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.684694 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.686343 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-czd57" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.711601 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.713528 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-pgjk5" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.743094 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.747079 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-hprbt" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.976245 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" Jan 22 05:33:29 crc kubenswrapper[4814]: I0122 05:33:29.978571 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-glzvm" Jan 22 05:33:30 crc kubenswrapper[4814]: I0122 05:33:30.023301 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:30 crc kubenswrapper[4814]: I0122 05:33:30.023342 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:30 crc kubenswrapper[4814]: I0122 05:33:30.059877 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:30 crc kubenswrapper[4814]: I0122 05:33:30.840549 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-27czn" Jan 22 05:33:31 crc kubenswrapper[4814]: I0122 05:33:31.664285 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-58546c67cc-hknfz" Jan 22 05:33:35 crc kubenswrapper[4814]: I0122 05:33:35.223092 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b85446lnr" Jan 22 05:33:36 crc kubenswrapper[4814]: I0122 05:33:36.188834 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:36 crc kubenswrapper[4814]: I0122 05:33:36.188925 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:36 crc kubenswrapper[4814]: I0122 05:33:36.240166 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:37 crc kubenswrapper[4814]: I0122 05:33:37.251000 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:37 crc kubenswrapper[4814]: I0122 05:33:37.342131 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h7fd2"] Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.223258 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-h7fd2" podUID="8455e598-45fe-4009-b653-fc98f17baf96" containerName="registry-server" containerID="cri-o://fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa" gracePeriod=2 Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.534436 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-ht9kj" Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.694150 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.779235 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-utilities\") pod \"8455e598-45fe-4009-b653-fc98f17baf96\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.780343 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-utilities" (OuterVolumeSpecName: "utilities") pod "8455e598-45fe-4009-b653-fc98f17baf96" (UID: "8455e598-45fe-4009-b653-fc98f17baf96"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.780597 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhvqf\" (UniqueName: \"kubernetes.io/projected/8455e598-45fe-4009-b653-fc98f17baf96-kube-api-access-nhvqf\") pod \"8455e598-45fe-4009-b653-fc98f17baf96\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.781622 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-catalog-content\") pod \"8455e598-45fe-4009-b653-fc98f17baf96\" (UID: \"8455e598-45fe-4009-b653-fc98f17baf96\") " Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.782082 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.786202 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8455e598-45fe-4009-b653-fc98f17baf96-kube-api-access-nhvqf" (OuterVolumeSpecName: "kube-api-access-nhvqf") pod "8455e598-45fe-4009-b653-fc98f17baf96" (UID: "8455e598-45fe-4009-b653-fc98f17baf96"). InnerVolumeSpecName "kube-api-access-nhvqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.849313 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8455e598-45fe-4009-b653-fc98f17baf96" (UID: "8455e598-45fe-4009-b653-fc98f17baf96"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.883850 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhvqf\" (UniqueName: \"kubernetes.io/projected/8455e598-45fe-4009-b653-fc98f17baf96-kube-api-access-nhvqf\") on node \"crc\" DevicePath \"\"" Jan 22 05:33:39 crc kubenswrapper[4814]: I0122 05:33:39.883890 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8455e598-45fe-4009-b653-fc98f17baf96-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.080481 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.127325 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9zrv4"] Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.231991 4814 generic.go:334] "Generic (PLEG): container finished" podID="8455e598-45fe-4009-b653-fc98f17baf96" containerID="fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa" exitCode=0 Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.232047 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fd2" event={"ID":"8455e598-45fe-4009-b653-fc98f17baf96","Type":"ContainerDied","Data":"fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa"} Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.232095 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h7fd2" event={"ID":"8455e598-45fe-4009-b653-fc98f17baf96","Type":"ContainerDied","Data":"f4fc235afd254bec61fcedfd7da1a4fc87f52e90c36aeed582b34e1fe0c0ce8a"} Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.232101 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h7fd2" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.232119 4814 scope.go:117] "RemoveContainer" containerID="fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.232188 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9zrv4" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerName="registry-server" containerID="cri-o://8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7" gracePeriod=2 Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.262336 4814 scope.go:117] "RemoveContainer" containerID="adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.279611 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h7fd2"] Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.283821 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-h7fd2"] Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.297195 4814 scope.go:117] "RemoveContainer" containerID="ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.360957 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8455e598-45fe-4009-b653-fc98f17baf96" path="/var/lib/kubelet/pods/8455e598-45fe-4009-b653-fc98f17baf96/volumes" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.398721 4814 scope.go:117] "RemoveContainer" containerID="fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa" Jan 22 05:33:40 crc kubenswrapper[4814]: E0122 05:33:40.399339 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa\": container with ID starting with fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa not found: ID does not exist" containerID="fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.399366 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa"} err="failed to get container status \"fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa\": rpc error: code = NotFound desc = could not find container \"fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa\": container with ID starting with fe3b4bd974b0fa2b4c0d1d9091795407622ba7c2d7184f236ef9a94c1ce7feaa not found: ID does not exist" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.399391 4814 scope.go:117] "RemoveContainer" containerID="adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174" Jan 22 05:33:40 crc kubenswrapper[4814]: E0122 05:33:40.399834 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174\": container with ID starting with adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174 not found: ID does not exist" containerID="adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.399860 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174"} err="failed to get container status \"adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174\": rpc error: code = NotFound desc = could not find container \"adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174\": container with ID starting with adee97d1c67bfa95b9209770c4bd34f3b52b6161772f4ef30ac1859a94477174 not found: ID does not exist" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.399876 4814 scope.go:117] "RemoveContainer" containerID="ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f" Jan 22 05:33:40 crc kubenswrapper[4814]: E0122 05:33:40.400255 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f\": container with ID starting with ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f not found: ID does not exist" containerID="ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.400282 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f"} err="failed to get container status \"ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f\": rpc error: code = NotFound desc = could not find container \"ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f\": container with ID starting with ceff3bea245ccbdfae5b3add004d74747d31ff5677a6910e7768037baaf7208f not found: ID does not exist" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.597760 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.692032 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-catalog-content\") pod \"7367f220-f4f6-471b-9ae0-6aa5adda56be\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.692087 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56x7w\" (UniqueName: \"kubernetes.io/projected/7367f220-f4f6-471b-9ae0-6aa5adda56be-kube-api-access-56x7w\") pod \"7367f220-f4f6-471b-9ae0-6aa5adda56be\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.692148 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-utilities\") pod \"7367f220-f4f6-471b-9ae0-6aa5adda56be\" (UID: \"7367f220-f4f6-471b-9ae0-6aa5adda56be\") " Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.693131 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-utilities" (OuterVolumeSpecName: "utilities") pod "7367f220-f4f6-471b-9ae0-6aa5adda56be" (UID: "7367f220-f4f6-471b-9ae0-6aa5adda56be"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.700865 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7367f220-f4f6-471b-9ae0-6aa5adda56be-kube-api-access-56x7w" (OuterVolumeSpecName: "kube-api-access-56x7w") pod "7367f220-f4f6-471b-9ae0-6aa5adda56be" (UID: "7367f220-f4f6-471b-9ae0-6aa5adda56be"). InnerVolumeSpecName "kube-api-access-56x7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.745969 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7367f220-f4f6-471b-9ae0-6aa5adda56be" (UID: "7367f220-f4f6-471b-9ae0-6aa5adda56be"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.794054 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.794098 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56x7w\" (UniqueName: \"kubernetes.io/projected/7367f220-f4f6-471b-9ae0-6aa5adda56be-kube-api-access-56x7w\") on node \"crc\" DevicePath \"\"" Jan 22 05:33:40 crc kubenswrapper[4814]: I0122 05:33:40.794110 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7367f220-f4f6-471b-9ae0-6aa5adda56be-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.240869 4814 generic.go:334] "Generic (PLEG): container finished" podID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerID="8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7" exitCode=0 Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.240937 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9zrv4" event={"ID":"7367f220-f4f6-471b-9ae0-6aa5adda56be","Type":"ContainerDied","Data":"8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7"} Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.240979 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9zrv4" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.241018 4814 scope.go:117] "RemoveContainer" containerID="8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.241000 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9zrv4" event={"ID":"7367f220-f4f6-471b-9ae0-6aa5adda56be","Type":"ContainerDied","Data":"13a297deef451747ba4babbb25f5c4312f79e5fd9faedf0a5c120d795959eec2"} Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.262759 4814 scope.go:117] "RemoveContainer" containerID="8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.286096 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9zrv4"] Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.296286 4814 scope.go:117] "RemoveContainer" containerID="03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.313232 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9zrv4"] Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.328071 4814 scope.go:117] "RemoveContainer" containerID="8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7" Jan 22 05:33:41 crc kubenswrapper[4814]: E0122 05:33:41.329063 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7\": container with ID starting with 8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7 not found: ID does not exist" containerID="8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.329121 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7"} err="failed to get container status \"8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7\": rpc error: code = NotFound desc = could not find container \"8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7\": container with ID starting with 8f2384f0f36f6204403709d21e11e6f9aeef588bd26b787e0a109c8c7addcba7 not found: ID does not exist" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.329147 4814 scope.go:117] "RemoveContainer" containerID="8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193" Jan 22 05:33:41 crc kubenswrapper[4814]: E0122 05:33:41.329461 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193\": container with ID starting with 8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193 not found: ID does not exist" containerID="8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.329484 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193"} err="failed to get container status \"8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193\": rpc error: code = NotFound desc = could not find container \"8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193\": container with ID starting with 8e73b8021c0914cf92be69f272512a51af214b4fd8221b91b598fbb8fa559193 not found: ID does not exist" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.329497 4814 scope.go:117] "RemoveContainer" containerID="03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2" Jan 22 05:33:41 crc kubenswrapper[4814]: E0122 05:33:41.329918 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2\": container with ID starting with 03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2 not found: ID does not exist" containerID="03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2" Jan 22 05:33:41 crc kubenswrapper[4814]: I0122 05:33:41.329938 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2"} err="failed to get container status \"03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2\": rpc error: code = NotFound desc = could not find container \"03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2\": container with ID starting with 03286b778fed3b1da11c72018eb63e1bb9d88c021978adbeffdd8959422e61a2 not found: ID does not exist" Jan 22 05:33:42 crc kubenswrapper[4814]: I0122 05:33:42.359213 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" path="/var/lib/kubelet/pods/7367f220-f4f6-471b-9ae0-6aa5adda56be/volumes" Jan 22 05:33:44 crc kubenswrapper[4814]: I0122 05:33:44.273081 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" event={"ID":"1327ad33-b223-4c96-9b96-a20816f50f4d","Type":"ContainerStarted","Data":"f7f1b9228cd2d3da04fb94d9d2e1f5dd308da98a1532491b27fadd2fd592c85f"} Jan 22 05:33:44 crc kubenswrapper[4814]: I0122 05:33:44.300289 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lvlrr" podStartSLOduration=2.385461524 podStartE2EDuration="1m5.300263278s" podCreationTimestamp="2026-01-22 05:32:39 +0000 UTC" firstStartedPulling="2026-01-22 05:32:40.891528359 +0000 UTC m=+846.975016574" lastFinishedPulling="2026-01-22 05:33:43.806330093 +0000 UTC m=+909.889818328" observedRunningTime="2026-01-22 05:33:44.294995084 +0000 UTC m=+910.378483339" watchObservedRunningTime="2026-01-22 05:33:44.300263278 +0000 UTC m=+910.383751513" Jan 22 05:33:49 crc kubenswrapper[4814]: I0122 05:33:49.614530 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:33:49 crc kubenswrapper[4814]: I0122 05:33:49.615260 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.199402 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-vcxc7"] Jan 22 05:34:01 crc kubenswrapper[4814]: E0122 05:34:01.200091 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8455e598-45fe-4009-b653-fc98f17baf96" containerName="registry-server" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200103 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8455e598-45fe-4009-b653-fc98f17baf96" containerName="registry-server" Jan 22 05:34:01 crc kubenswrapper[4814]: E0122 05:34:01.200114 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerName="registry-server" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200120 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerName="registry-server" Jan 22 05:34:01 crc kubenswrapper[4814]: E0122 05:34:01.200133 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerName="extract-content" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200139 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerName="extract-content" Jan 22 05:34:01 crc kubenswrapper[4814]: E0122 05:34:01.200148 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8455e598-45fe-4009-b653-fc98f17baf96" containerName="extract-utilities" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200154 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8455e598-45fe-4009-b653-fc98f17baf96" containerName="extract-utilities" Jan 22 05:34:01 crc kubenswrapper[4814]: E0122 05:34:01.200163 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerName="extract-utilities" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200169 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerName="extract-utilities" Jan 22 05:34:01 crc kubenswrapper[4814]: E0122 05:34:01.200186 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8455e598-45fe-4009-b653-fc98f17baf96" containerName="extract-content" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200192 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8455e598-45fe-4009-b653-fc98f17baf96" containerName="extract-content" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200301 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7367f220-f4f6-471b-9ae0-6aa5adda56be" containerName="registry-server" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200311 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="8455e598-45fe-4009-b653-fc98f17baf96" containerName="registry-server" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.200963 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.208691 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.208863 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.208980 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.209115 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-tb6s4" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.223475 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-vcxc7"] Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.312403 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mw4fc"] Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.313600 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.324713 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.340981 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mw4fc"] Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.387038 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hs8j\" (UniqueName: \"kubernetes.io/projected/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-kube-api-access-8hs8j\") pod \"dnsmasq-dns-675f4bcbfc-vcxc7\" (UID: \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\") " pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.387088 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-config\") pod \"dnsmasq-dns-675f4bcbfc-vcxc7\" (UID: \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\") " pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.488516 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hs8j\" (UniqueName: \"kubernetes.io/projected/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-kube-api-access-8hs8j\") pod \"dnsmasq-dns-675f4bcbfc-vcxc7\" (UID: \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\") " pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.488557 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-config\") pod \"dnsmasq-dns-675f4bcbfc-vcxc7\" (UID: \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\") " pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.488610 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.488653 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnxrh\" (UniqueName: \"kubernetes.io/projected/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-kube-api-access-wnxrh\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.488752 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-config\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.489619 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-config\") pod \"dnsmasq-dns-675f4bcbfc-vcxc7\" (UID: \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\") " pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.524772 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hs8j\" (UniqueName: \"kubernetes.io/projected/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-kube-api-access-8hs8j\") pod \"dnsmasq-dns-675f4bcbfc-vcxc7\" (UID: \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\") " pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.533009 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.590360 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-config\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.590635 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.590661 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnxrh\" (UniqueName: \"kubernetes.io/projected/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-kube-api-access-wnxrh\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.591258 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-config\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.591586 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.613762 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnxrh\" (UniqueName: \"kubernetes.io/projected/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-kube-api-access-wnxrh\") pod \"dnsmasq-dns-78dd6ddcc-mw4fc\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:01 crc kubenswrapper[4814]: I0122 05:34:01.629768 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:02 crc kubenswrapper[4814]: I0122 05:34:02.005683 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-vcxc7"] Jan 22 05:34:02 crc kubenswrapper[4814]: W0122 05:34:02.014763 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b95ccce_6e2f_48be_8ff4_7a107496b6f4.slice/crio-c39d65baf7cdb09aae7e468aaa087ce95f7f7e38c4f953fa1d431f1431af606a WatchSource:0}: Error finding container c39d65baf7cdb09aae7e468aaa087ce95f7f7e38c4f953fa1d431f1431af606a: Status 404 returned error can't find the container with id c39d65baf7cdb09aae7e468aaa087ce95f7f7e38c4f953fa1d431f1431af606a Jan 22 05:34:02 crc kubenswrapper[4814]: I0122 05:34:02.117505 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mw4fc"] Jan 22 05:34:02 crc kubenswrapper[4814]: I0122 05:34:02.415769 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" event={"ID":"1b95ccce-6e2f-48be-8ff4-7a107496b6f4","Type":"ContainerStarted","Data":"c39d65baf7cdb09aae7e468aaa087ce95f7f7e38c4f953fa1d431f1431af606a"} Jan 22 05:34:02 crc kubenswrapper[4814]: I0122 05:34:02.416657 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" event={"ID":"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3","Type":"ContainerStarted","Data":"511b515a6c3d9ac18f659ccba4ec1866a3219268c0d3de575e3db0f781bf8902"} Jan 22 05:34:03 crc kubenswrapper[4814]: I0122 05:34:03.943179 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-vcxc7"] Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.017678 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7vwmk"] Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.018674 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.036591 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7vwmk"] Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.165287 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-config\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.165387 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlsvb\" (UniqueName: \"kubernetes.io/projected/637640fe-91d4-446a-92b5-bde8a7dea007-kube-api-access-wlsvb\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.165425 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.266981 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlsvb\" (UniqueName: \"kubernetes.io/projected/637640fe-91d4-446a-92b5-bde8a7dea007-kube-api-access-wlsvb\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.267037 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.267063 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-config\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.267851 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-config\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.268587 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-dns-svc\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.318408 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlsvb\" (UniqueName: \"kubernetes.io/projected/637640fe-91d4-446a-92b5-bde8a7dea007-kube-api-access-wlsvb\") pod \"dnsmasq-dns-666b6646f7-7vwmk\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.370925 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.530086 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mw4fc"] Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.544118 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-grh2t"] Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.545177 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.559451 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-grh2t"] Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.681414 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nvsq\" (UniqueName: \"kubernetes.io/projected/8c0804d9-4deb-4e4a-a30b-22babff055b2-kube-api-access-8nvsq\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.681748 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.681786 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-config\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.784951 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nvsq\" (UniqueName: \"kubernetes.io/projected/8c0804d9-4deb-4e4a-a30b-22babff055b2-kube-api-access-8nvsq\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.785004 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.785031 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-config\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.786146 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-config\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.786987 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.816263 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nvsq\" (UniqueName: \"kubernetes.io/projected/8c0804d9-4deb-4e4a-a30b-22babff055b2-kube-api-access-8nvsq\") pod \"dnsmasq-dns-57d769cc4f-grh2t\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:04 crc kubenswrapper[4814]: I0122 05:34:04.921189 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.025839 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7vwmk"] Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.282273 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.283297 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.290536 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-qbc67" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.291022 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.291129 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.291220 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.291381 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.291487 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.292044 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.300456 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.410581 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-grh2t"] Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416112 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416154 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/22c14c36-2eb5-424d-a919-25f2e99eeb44-pod-info\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416175 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416201 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416220 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416234 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416769 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-996wr\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-kube-api-access-996wr\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416926 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/22c14c36-2eb5-424d-a919-25f2e99eeb44-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.416970 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-config-data\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.417153 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.417180 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-server-conf\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520223 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520269 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/22c14c36-2eb5-424d-a919-25f2e99eeb44-pod-info\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520291 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520317 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520335 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520348 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520379 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-996wr\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-kube-api-access-996wr\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520417 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/22c14c36-2eb5-424d-a919-25f2e99eeb44-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520430 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-config-data\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520475 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.520490 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-server-conf\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.521601 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-server-conf\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.529862 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.530395 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.530644 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.531595 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.532120 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-config-data\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.532590 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/22c14c36-2eb5-424d-a919-25f2e99eeb44-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.533254 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.549985 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/22c14c36-2eb5-424d-a919-25f2e99eeb44-pod-info\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.561911 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" event={"ID":"8c0804d9-4deb-4e4a-a30b-22babff055b2","Type":"ContainerStarted","Data":"381f3953b87b4d56677ea401f9cb8a31e99d86f0afbd529324f4188b5464379d"} Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.563542 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" event={"ID":"637640fe-91d4-446a-92b5-bde8a7dea007","Type":"ContainerStarted","Data":"98a0a7ea5a5cc30ae2a75e6f8d0cfabccc2b8106918eeb18e7d6f98b4de8986c"} Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.564463 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-996wr\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-kube-api-access-996wr\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.573875 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.604954 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.710375 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.711431 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.715739 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.715812 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.715973 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.716091 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-4g4bt" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.716196 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.716364 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.716617 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.728149 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.828916 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/14a83f70-2b64-417d-a198-d51bb829cea1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.828966 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.828987 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.829056 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.829077 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/14a83f70-2b64-417d-a198-d51bb829cea1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.829096 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.829128 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.829144 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.829166 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.829615 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.829659 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcmmw\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-kube-api-access-jcmmw\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.910337 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.930822 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.930900 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.930927 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcmmw\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-kube-api-access-jcmmw\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931027 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/14a83f70-2b64-417d-a198-d51bb829cea1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931065 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931101 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931568 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931667 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931701 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/14a83f70-2b64-417d-a198-d51bb829cea1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931726 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931775 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931803 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.931841 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.932122 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.932412 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.933043 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.933639 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.940304 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/14a83f70-2b64-417d-a198-d51bb829cea1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.940689 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/14a83f70-2b64-417d-a198-d51bb829cea1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.942477 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.949915 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcmmw\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-kube-api-access-jcmmw\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.951915 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:05 crc kubenswrapper[4814]: I0122 05:34:05.966733 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.042993 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.454855 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.566123 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.616976 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"22c14c36-2eb5-424d-a919-25f2e99eeb44","Type":"ContainerStarted","Data":"bcdc77fa9c2fd1b955df4add3c676763ee516ec4baf474ad8b81872b63c01527"} Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.660891 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.668839 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.671598 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.675808 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.675899 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.676269 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.677875 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-8mq8f" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.685315 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.863070 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/575e010d-db55-494f-8f39-c492c2bb22c8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.863117 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-config-data-default\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.863341 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wtt5\" (UniqueName: \"kubernetes.io/projected/575e010d-db55-494f-8f39-c492c2bb22c8-kube-api-access-8wtt5\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.863421 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/575e010d-db55-494f-8f39-c492c2bb22c8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.863437 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.863467 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.863514 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-kolla-config\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.863580 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/575e010d-db55-494f-8f39-c492c2bb22c8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.965292 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/575e010d-db55-494f-8f39-c492c2bb22c8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.965554 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.965583 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.965610 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-kolla-config\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.965640 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/575e010d-db55-494f-8f39-c492c2bb22c8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.965668 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/575e010d-db55-494f-8f39-c492c2bb22c8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.965686 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-config-data-default\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.965718 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wtt5\" (UniqueName: \"kubernetes.io/projected/575e010d-db55-494f-8f39-c492c2bb22c8-kube-api-access-8wtt5\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.966505 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/575e010d-db55-494f-8f39-c492c2bb22c8-config-data-generated\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.967231 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-operator-scripts\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.969471 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-config-data-default\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.969752 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.984203 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/575e010d-db55-494f-8f39-c492c2bb22c8-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:06 crc kubenswrapper[4814]: I0122 05:34:06.998124 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/575e010d-db55-494f-8f39-c492c2bb22c8-kolla-config\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:07 crc kubenswrapper[4814]: I0122 05:34:07.009650 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/575e010d-db55-494f-8f39-c492c2bb22c8-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:07 crc kubenswrapper[4814]: I0122 05:34:07.036161 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wtt5\" (UniqueName: \"kubernetes.io/projected/575e010d-db55-494f-8f39-c492c2bb22c8-kube-api-access-8wtt5\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:07 crc kubenswrapper[4814]: I0122 05:34:07.036865 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"575e010d-db55-494f-8f39-c492c2bb22c8\") " pod="openstack/openstack-galera-0" Jan 22 05:34:07 crc kubenswrapper[4814]: I0122 05:34:07.309071 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 22 05:34:07 crc kubenswrapper[4814]: I0122 05:34:07.624915 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"14a83f70-2b64-417d-a198-d51bb829cea1","Type":"ContainerStarted","Data":"ca62004ceebd500208f0b9728b4b066805c819f8709ef3164c876dd5cab34b62"} Jan 22 05:34:07 crc kubenswrapper[4814]: I0122 05:34:07.802815 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 22 05:34:07 crc kubenswrapper[4814]: W0122 05:34:07.826259 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod575e010d_db55_494f_8f39_c492c2bb22c8.slice/crio-e30d2e32329ea6a1781a95cc8192dc3e7554b17b7523f497d1fc58ae53de5bec WatchSource:0}: Error finding container e30d2e32329ea6a1781a95cc8192dc3e7554b17b7523f497d1fc58ae53de5bec: Status 404 returned error can't find the container with id e30d2e32329ea6a1781a95cc8192dc3e7554b17b7523f497d1fc58ae53de5bec Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.242755 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.243565 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.251896 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.252186 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.252325 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-rr7kt" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.255478 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.331525 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.333157 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.343998 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.344742 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-bnppz" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.344863 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.346661 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.400995 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.419807 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljw2f\" (UniqueName: \"kubernetes.io/projected/cb12b27a-d0b6-4f39-8795-9001d89527c1-kube-api-access-ljw2f\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.419850 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.419966 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420015 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420054 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cb12b27a-d0b6-4f39-8795-9001d89527c1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420112 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb12b27a-d0b6-4f39-8795-9001d89527c1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420152 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d42acabc-610a-444e-b480-ae5967f80f67-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420188 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l95bq\" (UniqueName: \"kubernetes.io/projected/d42acabc-610a-444e-b480-ae5967f80f67-kube-api-access-l95bq\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420216 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb12b27a-d0b6-4f39-8795-9001d89527c1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420297 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d42acabc-610a-444e-b480-ae5967f80f67-config-data\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420335 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420413 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d42acabc-610a-444e-b480-ae5967f80f67-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.420446 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d42acabc-610a-444e-b480-ae5967f80f67-kolla-config\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522207 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522451 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522475 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cb12b27a-d0b6-4f39-8795-9001d89527c1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522513 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb12b27a-d0b6-4f39-8795-9001d89527c1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522574 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d42acabc-610a-444e-b480-ae5967f80f67-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522593 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l95bq\" (UniqueName: \"kubernetes.io/projected/d42acabc-610a-444e-b480-ae5967f80f67-kube-api-access-l95bq\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522613 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb12b27a-d0b6-4f39-8795-9001d89527c1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522657 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d42acabc-610a-444e-b480-ae5967f80f67-config-data\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522679 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522708 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d42acabc-610a-444e-b480-ae5967f80f67-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522723 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d42acabc-610a-444e-b480-ae5967f80f67-kolla-config\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522740 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljw2f\" (UniqueName: \"kubernetes.io/projected/cb12b27a-d0b6-4f39-8795-9001d89527c1-kube-api-access-ljw2f\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.522762 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.523477 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.523540 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.524368 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.525237 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d42acabc-610a-444e-b480-ae5967f80f67-kolla-config\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.526256 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cb12b27a-d0b6-4f39-8795-9001d89527c1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.528427 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d42acabc-610a-444e-b480-ae5967f80f67-config-data\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.528472 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/cb12b27a-d0b6-4f39-8795-9001d89527c1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.531321 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb12b27a-d0b6-4f39-8795-9001d89527c1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.539473 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d42acabc-610a-444e-b480-ae5967f80f67-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.545268 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljw2f\" (UniqueName: \"kubernetes.io/projected/cb12b27a-d0b6-4f39-8795-9001d89527c1-kube-api-access-ljw2f\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.549401 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l95bq\" (UniqueName: \"kubernetes.io/projected/d42acabc-610a-444e-b480-ae5967f80f67-kube-api-access-l95bq\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.555139 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d42acabc-610a-444e-b480-ae5967f80f67-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d42acabc-610a-444e-b480-ae5967f80f67\") " pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.576709 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.600236 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb12b27a-d0b6-4f39-8795-9001d89527c1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"cb12b27a-d0b6-4f39-8795-9001d89527c1\") " pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.646512 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"575e010d-db55-494f-8f39-c492c2bb22c8","Type":"ContainerStarted","Data":"e30d2e32329ea6a1781a95cc8192dc3e7554b17b7523f497d1fc58ae53de5bec"} Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.648041 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 22 05:34:08 crc kubenswrapper[4814]: I0122 05:34:08.665913 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:09 crc kubenswrapper[4814]: I0122 05:34:09.375937 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 22 05:34:09 crc kubenswrapper[4814]: I0122 05:34:09.400414 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 22 05:34:09 crc kubenswrapper[4814]: I0122 05:34:09.696418 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cb12b27a-d0b6-4f39-8795-9001d89527c1","Type":"ContainerStarted","Data":"a4dcc1d30428c7836dc3f3534c51cb369c11f1d9c0e176faa85bfbbcd264d457"} Jan 22 05:34:09 crc kubenswrapper[4814]: I0122 05:34:09.708560 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d42acabc-610a-444e-b480-ae5967f80f67","Type":"ContainerStarted","Data":"9373ee48bb5d25482ab53f40f76b786f83bc382936b973316c11a802f455dc45"} Jan 22 05:34:10 crc kubenswrapper[4814]: I0122 05:34:10.597852 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:34:10 crc kubenswrapper[4814]: I0122 05:34:10.601192 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 05:34:10 crc kubenswrapper[4814]: I0122 05:34:10.649298 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-4qwn4" Jan 22 05:34:10 crc kubenswrapper[4814]: I0122 05:34:10.658600 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hslm\" (UniqueName: \"kubernetes.io/projected/4ab6f947-7aad-4ca5-98d2-0803c62ed26d-kube-api-access-8hslm\") pod \"kube-state-metrics-0\" (UID: \"4ab6f947-7aad-4ca5-98d2-0803c62ed26d\") " pod="openstack/kube-state-metrics-0" Jan 22 05:34:10 crc kubenswrapper[4814]: I0122 05:34:10.668391 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:34:10 crc kubenswrapper[4814]: I0122 05:34:10.764962 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hslm\" (UniqueName: \"kubernetes.io/projected/4ab6f947-7aad-4ca5-98d2-0803c62ed26d-kube-api-access-8hslm\") pod \"kube-state-metrics-0\" (UID: \"4ab6f947-7aad-4ca5-98d2-0803c62ed26d\") " pod="openstack/kube-state-metrics-0" Jan 22 05:34:10 crc kubenswrapper[4814]: I0122 05:34:10.790754 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hslm\" (UniqueName: \"kubernetes.io/projected/4ab6f947-7aad-4ca5-98d2-0803c62ed26d-kube-api-access-8hslm\") pod \"kube-state-metrics-0\" (UID: \"4ab6f947-7aad-4ca5-98d2-0803c62ed26d\") " pod="openstack/kube-state-metrics-0" Jan 22 05:34:10 crc kubenswrapper[4814]: I0122 05:34:10.998289 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 05:34:11 crc kubenswrapper[4814]: I0122 05:34:11.640295 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:34:11 crc kubenswrapper[4814]: W0122 05:34:11.664575 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ab6f947_7aad_4ca5_98d2_0803c62ed26d.slice/crio-240782e9fd382f38c2a0195336ee82f980e7bc392bb01cfef6a58a68cca6ca6f WatchSource:0}: Error finding container 240782e9fd382f38c2a0195336ee82f980e7bc392bb01cfef6a58a68cca6ca6f: Status 404 returned error can't find the container with id 240782e9fd382f38c2a0195336ee82f980e7bc392bb01cfef6a58a68cca6ca6f Jan 22 05:34:11 crc kubenswrapper[4814]: I0122 05:34:11.738269 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ab6f947-7aad-4ca5-98d2-0803c62ed26d","Type":"ContainerStarted","Data":"240782e9fd382f38c2a0195336ee82f980e7bc392bb01cfef6a58a68cca6ca6f"} Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.798988 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5ll9n"] Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.800672 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.806193 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.806355 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-nl5l6" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.806535 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.819781 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-l9d6w"] Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.830410 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.866664 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5ll9n"] Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.879172 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-l9d6w"] Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948565 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9c3c821-607f-4f2d-8b28-ae58bce1864d-scripts\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948641 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-etc-ovs\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948662 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84ffj\" (UniqueName: \"kubernetes.io/projected/c9c3c821-607f-4f2d-8b28-ae58bce1864d-kube-api-access-84ffj\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948679 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-lib\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948708 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9c3c821-607f-4f2d-8b28-ae58bce1864d-ovn-controller-tls-certs\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948723 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-run-ovn\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948736 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9c3c821-607f-4f2d-8b28-ae58bce1864d-combined-ca-bundle\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948752 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-log-ovn\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948765 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6078618-2314-44ee-97dc-76a10abffb9d-scripts\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948781 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-run\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948806 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-run\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948822 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-log\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:13 crc kubenswrapper[4814]: I0122 05:34:13.948870 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbpnk\" (UniqueName: \"kubernetes.io/projected/c6078618-2314-44ee-97dc-76a10abffb9d-kube-api-access-gbpnk\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.025277 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.026593 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.034036 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.034280 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.034436 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.034799 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.034984 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-tbbsq" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051321 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbpnk\" (UniqueName: \"kubernetes.io/projected/c6078618-2314-44ee-97dc-76a10abffb9d-kube-api-access-gbpnk\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051373 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9c3c821-607f-4f2d-8b28-ae58bce1864d-scripts\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051402 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-etc-ovs\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051420 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84ffj\" (UniqueName: \"kubernetes.io/projected/c9c3c821-607f-4f2d-8b28-ae58bce1864d-kube-api-access-84ffj\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051435 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-lib\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051459 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9c3c821-607f-4f2d-8b28-ae58bce1864d-ovn-controller-tls-certs\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051474 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-run-ovn\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051489 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9c3c821-607f-4f2d-8b28-ae58bce1864d-combined-ca-bundle\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051505 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-log-ovn\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051520 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6078618-2314-44ee-97dc-76a10abffb9d-scripts\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051536 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-run\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.051568 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-run\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.053923 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-log-ovn\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.054093 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-run-ovn\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.056508 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-log\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.056764 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-log\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.058521 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.062002 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c9c3c821-607f-4f2d-8b28-ae58bce1864d-var-run\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.062436 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-run\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.063215 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-etc-ovs\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.066054 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9c3c821-607f-4f2d-8b28-ae58bce1864d-scripts\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.070885 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c6078618-2314-44ee-97dc-76a10abffb9d-var-lib\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.071818 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9c3c821-607f-4f2d-8b28-ae58bce1864d-ovn-controller-tls-certs\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.072987 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c6078618-2314-44ee-97dc-76a10abffb9d-scripts\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.105965 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9c3c821-607f-4f2d-8b28-ae58bce1864d-combined-ca-bundle\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.111621 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84ffj\" (UniqueName: \"kubernetes.io/projected/c9c3c821-607f-4f2d-8b28-ae58bce1864d-kube-api-access-84ffj\") pod \"ovn-controller-5ll9n\" (UID: \"c9c3c821-607f-4f2d-8b28-ae58bce1864d\") " pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.113847 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbpnk\" (UniqueName: \"kubernetes.io/projected/c6078618-2314-44ee-97dc-76a10abffb9d-kube-api-access-gbpnk\") pod \"ovn-controller-ovs-l9d6w\" (UID: \"c6078618-2314-44ee-97dc-76a10abffb9d\") " pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.137133 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.168841 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.168885 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8525t\" (UniqueName: \"kubernetes.io/projected/4df0b967-80be-424c-b802-ed5393c1c9a6-kube-api-access-8525t\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.168928 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.170664 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4df0b967-80be-424c-b802-ed5393c1c9a6-config\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.170704 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.170735 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4df0b967-80be-424c-b802-ed5393c1c9a6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.170763 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4df0b967-80be-424c-b802-ed5393c1c9a6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.170784 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.195049 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.271758 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4df0b967-80be-424c-b802-ed5393c1c9a6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.271824 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.271876 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.271898 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8525t\" (UniqueName: \"kubernetes.io/projected/4df0b967-80be-424c-b802-ed5393c1c9a6-kube-api-access-8525t\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.271935 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.271965 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4df0b967-80be-424c-b802-ed5393c1c9a6-config\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.271985 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.272022 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4df0b967-80be-424c-b802-ed5393c1c9a6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.272659 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.272930 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4df0b967-80be-424c-b802-ed5393c1c9a6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.272929 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4df0b967-80be-424c-b802-ed5393c1c9a6-config\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.283471 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.283649 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.306009 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df0b967-80be-424c-b802-ed5393c1c9a6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.535948 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4df0b967-80be-424c-b802-ed5393c1c9a6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.550634 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8525t\" (UniqueName: \"kubernetes.io/projected/4df0b967-80be-424c-b802-ed5393c1c9a6-kube-api-access-8525t\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.563070 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4df0b967-80be-424c-b802-ed5393c1c9a6\") " pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:14 crc kubenswrapper[4814]: I0122 05:34:14.769699 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.531547 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.532893 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.543283 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-tj226" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.543337 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.543374 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.543838 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.549686 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.642475 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76c22072-97c7-4207-aa52-94e95694550c-config\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.642533 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.642564 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.642588 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95tnf\" (UniqueName: \"kubernetes.io/projected/76c22072-97c7-4207-aa52-94e95694550c-kube-api-access-95tnf\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.642607 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.642680 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76c22072-97c7-4207-aa52-94e95694550c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.642697 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76c22072-97c7-4207-aa52-94e95694550c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.642724 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.743504 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76c22072-97c7-4207-aa52-94e95694550c-config\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.743838 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.743928 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.743997 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95tnf\" (UniqueName: \"kubernetes.io/projected/76c22072-97c7-4207-aa52-94e95694550c-kube-api-access-95tnf\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.744061 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.744157 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76c22072-97c7-4207-aa52-94e95694550c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.744230 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76c22072-97c7-4207-aa52-94e95694550c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.744306 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.747276 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/76c22072-97c7-4207-aa52-94e95694550c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.747423 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.749658 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/76c22072-97c7-4207-aa52-94e95694550c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.750222 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76c22072-97c7-4207-aa52-94e95694550c-config\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.753731 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.761658 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.764234 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76c22072-97c7-4207-aa52-94e95694550c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.767204 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95tnf\" (UniqueName: \"kubernetes.io/projected/76c22072-97c7-4207-aa52-94e95694550c-kube-api-access-95tnf\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.767320 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"76c22072-97c7-4207-aa52-94e95694550c\") " pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:17 crc kubenswrapper[4814]: I0122 05:34:17.853319 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:19 crc kubenswrapper[4814]: I0122 05:34:19.614607 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:34:19 crc kubenswrapper[4814]: I0122 05:34:19.615141 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:34:32 crc kubenswrapper[4814]: E0122 05:34:32.703024 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Jan 22 05:34:32 crc kubenswrapper[4814]: E0122 05:34:32.703623 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n5bchd9h5f8h59bh59bh8dh596h545h59dh566h87h5fchf6h588h589h547h5c6h556h5f5h5ddhbbh59fh649h79hc6h58ch85h5cdh56ch598hd8h64bq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l95bq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(d42acabc-610a-444e-b480-ae5967f80f67): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:32 crc kubenswrapper[4814]: E0122 05:34:32.704814 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="d42acabc-610a-444e-b480-ae5967f80f67" Jan 22 05:34:32 crc kubenswrapper[4814]: E0122 05:34:32.967434 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="d42acabc-610a-444e-b480-ae5967f80f67" Jan 22 05:34:34 crc kubenswrapper[4814]: E0122 05:34:34.812553 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Jan 22 05:34:34 crc kubenswrapper[4814]: E0122 05:34:34.812716 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8wtt5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(575e010d-db55-494f-8f39-c492c2bb22c8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:34 crc kubenswrapper[4814]: E0122 05:34:34.813895 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="575e010d-db55-494f-8f39-c492c2bb22c8" Jan 22 05:34:34 crc kubenswrapper[4814]: E0122 05:34:34.978409 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="575e010d-db55-494f-8f39-c492c2bb22c8" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.782803 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.784703 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-996wr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(22c14c36-2eb5-424d-a919-25f2e99eeb44): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.786462 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.799202 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.799358 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ljw2f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(cb12b27a-d0b6-4f39-8795-9001d89527c1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.800608 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="cb12b27a-d0b6-4f39-8795-9001d89527c1" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.807364 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.807523 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jcmmw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(14a83f70-2b64-417d-a198-d51bb829cea1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.808875 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.986824 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="cb12b27a-d0b6-4f39-8795-9001d89527c1" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.986836 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" Jan 22 05:34:35 crc kubenswrapper[4814]: E0122 05:34:35.986847 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" Jan 22 05:34:36 crc kubenswrapper[4814]: I0122 05:34:36.357313 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 22 05:34:36 crc kubenswrapper[4814]: W0122 05:34:36.616244 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4df0b967_80be_424c_b802_ed5393c1c9a6.slice/crio-79264796a88f25d56a4ae15872e200a9670ed3509e408ccd7353e38e605fdb98 WatchSource:0}: Error finding container 79264796a88f25d56a4ae15872e200a9670ed3509e408ccd7353e38e605fdb98: Status 404 returned error can't find the container with id 79264796a88f25d56a4ae15872e200a9670ed3509e408ccd7353e38e605fdb98 Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.629301 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.629655 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wnxrh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-mw4fc_openstack(a7681e0d-3df9-4710-9a4a-17c3e25dd1a3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.630900 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" podUID="a7681e0d-3df9-4710-9a4a-17c3e25dd1a3" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.652503 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.652630 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8nvsq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-grh2t_openstack(8c0804d9-4deb-4e4a-a30b-22babff055b2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.653993 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" podUID="8c0804d9-4deb-4e4a-a30b-22babff055b2" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.712789 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.712944 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wlsvb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-7vwmk_openstack(637640fe-91d4-446a-92b5-bde8a7dea007): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.714075 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" podUID="637640fe-91d4-446a-92b5-bde8a7dea007" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.741840 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.742714 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8hs8j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-vcxc7_openstack(1b95ccce-6e2f-48be-8ff4-7a107496b6f4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:34:36 crc kubenswrapper[4814]: E0122 05:34:36.744074 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" podUID="1b95ccce-6e2f-48be-8ff4-7a107496b6f4" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.000195 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4df0b967-80be-424c-b802-ed5393c1c9a6","Type":"ContainerStarted","Data":"79264796a88f25d56a4ae15872e200a9670ed3509e408ccd7353e38e605fdb98"} Jan 22 05:34:37 crc kubenswrapper[4814]: E0122 05:34:37.003091 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" podUID="8c0804d9-4deb-4e4a-a30b-22babff055b2" Jan 22 05:34:37 crc kubenswrapper[4814]: E0122 05:34:37.003282 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" podUID="637640fe-91d4-446a-92b5-bde8a7dea007" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.135824 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5ll9n"] Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.324138 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.433671 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-l9d6w"] Jan 22 05:34:37 crc kubenswrapper[4814]: W0122 05:34:37.463434 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6078618_2314_44ee_97dc_76a10abffb9d.slice/crio-f69ab29412bc27706422691ed29b3287e3970c87884c9db24f7fee8ba55ceef3 WatchSource:0}: Error finding container f69ab29412bc27706422691ed29b3287e3970c87884c9db24f7fee8ba55ceef3: Status 404 returned error can't find the container with id f69ab29412bc27706422691ed29b3287e3970c87884c9db24f7fee8ba55ceef3 Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.505474 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.700232 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-config\") pod \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\" (UID: \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\") " Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.700328 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hs8j\" (UniqueName: \"kubernetes.io/projected/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-kube-api-access-8hs8j\") pod \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\" (UID: \"1b95ccce-6e2f-48be-8ff4-7a107496b6f4\") " Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.701656 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-config" (OuterVolumeSpecName: "config") pod "1b95ccce-6e2f-48be-8ff4-7a107496b6f4" (UID: "1b95ccce-6e2f-48be-8ff4-7a107496b6f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.708833 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-kube-api-access-8hs8j" (OuterVolumeSpecName: "kube-api-access-8hs8j") pod "1b95ccce-6e2f-48be-8ff4-7a107496b6f4" (UID: "1b95ccce-6e2f-48be-8ff4-7a107496b6f4"). InnerVolumeSpecName "kube-api-access-8hs8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.710591 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.801756 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hs8j\" (UniqueName: \"kubernetes.io/projected/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-kube-api-access-8hs8j\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.801790 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b95ccce-6e2f-48be-8ff4-7a107496b6f4-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.902924 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnxrh\" (UniqueName: \"kubernetes.io/projected/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-kube-api-access-wnxrh\") pod \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.902989 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-dns-svc\") pod \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.903110 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-config\") pod \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\" (UID: \"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3\") " Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.903601 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a7681e0d-3df9-4710-9a4a-17c3e25dd1a3" (UID: "a7681e0d-3df9-4710-9a4a-17c3e25dd1a3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.903683 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-config" (OuterVolumeSpecName: "config") pod "a7681e0d-3df9-4710-9a4a-17c3e25dd1a3" (UID: "a7681e0d-3df9-4710-9a4a-17c3e25dd1a3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:34:37 crc kubenswrapper[4814]: I0122 05:34:37.906257 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-kube-api-access-wnxrh" (OuterVolumeSpecName: "kube-api-access-wnxrh") pod "a7681e0d-3df9-4710-9a4a-17c3e25dd1a3" (UID: "a7681e0d-3df9-4710-9a4a-17c3e25dd1a3"). InnerVolumeSpecName "kube-api-access-wnxrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.005375 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.005400 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnxrh\" (UniqueName: \"kubernetes.io/projected/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-kube-api-access-wnxrh\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.005411 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.007574 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-l9d6w" event={"ID":"c6078618-2314-44ee-97dc-76a10abffb9d","Type":"ContainerStarted","Data":"f69ab29412bc27706422691ed29b3287e3970c87884c9db24f7fee8ba55ceef3"} Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.008423 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" event={"ID":"a7681e0d-3df9-4710-9a4a-17c3e25dd1a3","Type":"ContainerDied","Data":"511b515a6c3d9ac18f659ccba4ec1866a3219268c0d3de575e3db0f781bf8902"} Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.008485 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-mw4fc" Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.012296 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"76c22072-97c7-4207-aa52-94e95694550c","Type":"ContainerStarted","Data":"3100690303c72c3bc20d491028c3ed32230ac84b197500b403a1600c932e19e5"} Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.013705 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" event={"ID":"1b95ccce-6e2f-48be-8ff4-7a107496b6f4","Type":"ContainerDied","Data":"c39d65baf7cdb09aae7e468aaa087ce95f7f7e38c4f953fa1d431f1431af606a"} Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.013724 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-vcxc7" Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.014815 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5ll9n" event={"ID":"c9c3c821-607f-4f2d-8b28-ae58bce1864d","Type":"ContainerStarted","Data":"bec725abf7d31ff099f9cbe898bd7d57c73f5eec9995a7a91cb686fdd911b280"} Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.141223 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mw4fc"] Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.146199 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-mw4fc"] Jan 22 05:34:38 crc kubenswrapper[4814]: E0122 05:34:38.160997 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7681e0d_3df9_4710_9a4a_17c3e25dd1a3.slice/crio-511b515a6c3d9ac18f659ccba4ec1866a3219268c0d3de575e3db0f781bf8902\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7681e0d_3df9_4710_9a4a_17c3e25dd1a3.slice\": RecentStats: unable to find data in memory cache]" Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.167965 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-vcxc7"] Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.175645 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-vcxc7"] Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.356073 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b95ccce-6e2f-48be-8ff4-7a107496b6f4" path="/var/lib/kubelet/pods/1b95ccce-6e2f-48be-8ff4-7a107496b6f4/volumes" Jan 22 05:34:38 crc kubenswrapper[4814]: I0122 05:34:38.356517 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7681e0d-3df9-4710-9a4a-17c3e25dd1a3" path="/var/lib/kubelet/pods/a7681e0d-3df9-4710-9a4a-17c3e25dd1a3/volumes" Jan 22 05:34:39 crc kubenswrapper[4814]: I0122 05:34:39.024059 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ab6f947-7aad-4ca5-98d2-0803c62ed26d","Type":"ContainerStarted","Data":"773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193"} Jan 22 05:34:39 crc kubenswrapper[4814]: I0122 05:34:39.024274 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 22 05:34:39 crc kubenswrapper[4814]: I0122 05:34:39.044937 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.122458011 podStartE2EDuration="29.044920446s" podCreationTimestamp="2026-01-22 05:34:10 +0000 UTC" firstStartedPulling="2026-01-22 05:34:11.672591146 +0000 UTC m=+937.756079361" lastFinishedPulling="2026-01-22 05:34:38.595053581 +0000 UTC m=+964.678541796" observedRunningTime="2026-01-22 05:34:39.039036304 +0000 UTC m=+965.122524519" watchObservedRunningTime="2026-01-22 05:34:39.044920446 +0000 UTC m=+965.128408661" Jan 22 05:34:42 crc kubenswrapper[4814]: I0122 05:34:42.054194 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"76c22072-97c7-4207-aa52-94e95694550c","Type":"ContainerStarted","Data":"dab7a65e8d6817f04c0d4f860a9738d0e60baf9599447e7f3ee459c1486e9632"} Jan 22 05:34:42 crc kubenswrapper[4814]: I0122 05:34:42.057989 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5ll9n" event={"ID":"c9c3c821-607f-4f2d-8b28-ae58bce1864d","Type":"ContainerStarted","Data":"4fe824a1fc24c0623c96fb0082949fa6d9005c6118fd60f6c81165b4b68250b0"} Jan 22 05:34:42 crc kubenswrapper[4814]: I0122 05:34:42.058359 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-5ll9n" Jan 22 05:34:42 crc kubenswrapper[4814]: I0122 05:34:42.061393 4814 generic.go:334] "Generic (PLEG): container finished" podID="c6078618-2314-44ee-97dc-76a10abffb9d" containerID="734a6704babb6ca53c76090345be0ee222f85ca134eb06277b58a17fcd787ba7" exitCode=0 Jan 22 05:34:42 crc kubenswrapper[4814]: I0122 05:34:42.061509 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-l9d6w" event={"ID":"c6078618-2314-44ee-97dc-76a10abffb9d","Type":"ContainerDied","Data":"734a6704babb6ca53c76090345be0ee222f85ca134eb06277b58a17fcd787ba7"} Jan 22 05:34:42 crc kubenswrapper[4814]: I0122 05:34:42.065814 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4df0b967-80be-424c-b802-ed5393c1c9a6","Type":"ContainerStarted","Data":"30d13ce4014c708de0dee878dcce40c3525166242e94554ecdbe766b38021714"} Jan 22 05:34:42 crc kubenswrapper[4814]: I0122 05:34:42.144308 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-5ll9n" podStartSLOduration=25.559010744 podStartE2EDuration="29.144289786s" podCreationTimestamp="2026-01-22 05:34:13 +0000 UTC" firstStartedPulling="2026-01-22 05:34:37.467194163 +0000 UTC m=+963.550682378" lastFinishedPulling="2026-01-22 05:34:41.052473195 +0000 UTC m=+967.135961420" observedRunningTime="2026-01-22 05:34:42.109055955 +0000 UTC m=+968.192544170" watchObservedRunningTime="2026-01-22 05:34:42.144289786 +0000 UTC m=+968.227778001" Jan 22 05:34:43 crc kubenswrapper[4814]: I0122 05:34:43.089384 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-l9d6w" event={"ID":"c6078618-2314-44ee-97dc-76a10abffb9d","Type":"ContainerStarted","Data":"a7aabca146325e0ee30d947fbec876a423ace889b0a0b9c44350385b31fb2726"} Jan 22 05:34:43 crc kubenswrapper[4814]: I0122 05:34:43.089824 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-l9d6w" event={"ID":"c6078618-2314-44ee-97dc-76a10abffb9d","Type":"ContainerStarted","Data":"503cac0209f1986a6111c104e8e6bf281fcccbbb7bdaccdc3b896a18577f6949"} Jan 22 05:34:43 crc kubenswrapper[4814]: I0122 05:34:43.089864 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:43 crc kubenswrapper[4814]: I0122 05:34:43.089895 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:34:43 crc kubenswrapper[4814]: I0122 05:34:43.116976 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-l9d6w" podStartSLOduration=26.541412257 podStartE2EDuration="30.116960837s" podCreationTimestamp="2026-01-22 05:34:13 +0000 UTC" firstStartedPulling="2026-01-22 05:34:37.47161184 +0000 UTC m=+963.555100055" lastFinishedPulling="2026-01-22 05:34:41.04716041 +0000 UTC m=+967.130648635" observedRunningTime="2026-01-22 05:34:43.114620444 +0000 UTC m=+969.198108659" watchObservedRunningTime="2026-01-22 05:34:43.116960837 +0000 UTC m=+969.200449052" Jan 22 05:34:45 crc kubenswrapper[4814]: I0122 05:34:45.106543 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4df0b967-80be-424c-b802-ed5393c1c9a6","Type":"ContainerStarted","Data":"d482aeb886d503caf31f5d5d4551499d1b9c6b9845de2d87346be688a2db656a"} Jan 22 05:34:45 crc kubenswrapper[4814]: I0122 05:34:45.134379 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=24.889373943 podStartE2EDuration="33.13436217s" podCreationTimestamp="2026-01-22 05:34:12 +0000 UTC" firstStartedPulling="2026-01-22 05:34:36.629957818 +0000 UTC m=+962.713446043" lastFinishedPulling="2026-01-22 05:34:44.874946055 +0000 UTC m=+970.958434270" observedRunningTime="2026-01-22 05:34:45.132660828 +0000 UTC m=+971.216149063" watchObservedRunningTime="2026-01-22 05:34:45.13436217 +0000 UTC m=+971.217850385" Jan 22 05:34:46 crc kubenswrapper[4814]: I0122 05:34:46.123045 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"76c22072-97c7-4207-aa52-94e95694550c","Type":"ContainerStarted","Data":"4372378875febedf0f36cd73fafb76896eb63965540f8573f3a4d9fdc89a67b6"} Jan 22 05:34:46 crc kubenswrapper[4814]: I0122 05:34:46.154269 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=22.740932471 podStartE2EDuration="30.154252414s" podCreationTimestamp="2026-01-22 05:34:16 +0000 UTC" firstStartedPulling="2026-01-22 05:34:37.456206623 +0000 UTC m=+963.539694838" lastFinishedPulling="2026-01-22 05:34:44.869526566 +0000 UTC m=+970.953014781" observedRunningTime="2026-01-22 05:34:46.148445504 +0000 UTC m=+972.231933719" watchObservedRunningTime="2026-01-22 05:34:46.154252414 +0000 UTC m=+972.237740629" Jan 22 05:34:47 crc kubenswrapper[4814]: I0122 05:34:47.149973 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d42acabc-610a-444e-b480-ae5967f80f67","Type":"ContainerStarted","Data":"3e9430273dc09664707303e70f945d2d0a076d01328850e0b6089940bb38020f"} Jan 22 05:34:47 crc kubenswrapper[4814]: I0122 05:34:47.150877 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 22 05:34:47 crc kubenswrapper[4814]: I0122 05:34:47.185710 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.628707976 podStartE2EDuration="39.185676115s" podCreationTimestamp="2026-01-22 05:34:08 +0000 UTC" firstStartedPulling="2026-01-22 05:34:09.436347459 +0000 UTC m=+935.519835674" lastFinishedPulling="2026-01-22 05:34:45.993315598 +0000 UTC m=+972.076803813" observedRunningTime="2026-01-22 05:34:47.183029293 +0000 UTC m=+973.266517548" watchObservedRunningTime="2026-01-22 05:34:47.185676115 +0000 UTC m=+973.269164370" Jan 22 05:34:47 crc kubenswrapper[4814]: I0122 05:34:47.770593 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:47 crc kubenswrapper[4814]: I0122 05:34:47.836498 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:47 crc kubenswrapper[4814]: I0122 05:34:47.855056 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:47 crc kubenswrapper[4814]: I0122 05:34:47.855106 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:47 crc kubenswrapper[4814]: I0122 05:34:47.917220 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.157030 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.220540 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.221516 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.432936 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7vwmk"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.462663 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-4wjwc"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.473842 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.481824 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.519694 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-4wjwc"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.525962 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-nj984"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.526967 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.531883 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.541254 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-nj984"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577077 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ptpn\" (UniqueName: \"kubernetes.io/projected/5adc7663-af12-4aeb-948a-39c0623a0b08-kube-api-access-7ptpn\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577154 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-config\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577200 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5adc7663-af12-4aeb-948a-39c0623a0b08-combined-ca-bundle\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577247 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577299 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5adc7663-af12-4aeb-948a-39c0623a0b08-ovn-rundir\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577352 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5adc7663-af12-4aeb-948a-39c0623a0b08-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577371 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5adc7663-af12-4aeb-948a-39c0623a0b08-config\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577440 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5adc7663-af12-4aeb-948a-39c0623a0b08-ovs-rundir\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577463 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hfhk\" (UniqueName: \"kubernetes.io/projected/facb7dd2-b0d1-457f-9b21-900a2132b3fd-kube-api-access-6hfhk\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.577487 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.683235 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-grh2t"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686088 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686139 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5adc7663-af12-4aeb-948a-39c0623a0b08-ovn-rundir\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686179 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5adc7663-af12-4aeb-948a-39c0623a0b08-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686200 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5adc7663-af12-4aeb-948a-39c0623a0b08-config\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686231 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5adc7663-af12-4aeb-948a-39c0623a0b08-ovs-rundir\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686249 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hfhk\" (UniqueName: \"kubernetes.io/projected/facb7dd2-b0d1-457f-9b21-900a2132b3fd-kube-api-access-6hfhk\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686273 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686302 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ptpn\" (UniqueName: \"kubernetes.io/projected/5adc7663-af12-4aeb-948a-39c0623a0b08-kube-api-access-7ptpn\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686333 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-config\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.686356 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5adc7663-af12-4aeb-948a-39c0623a0b08-combined-ca-bundle\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.687222 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5adc7663-af12-4aeb-948a-39c0623a0b08-ovs-rundir\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.687604 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5adc7663-af12-4aeb-948a-39c0623a0b08-config\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.687971 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.688022 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5adc7663-af12-4aeb-948a-39c0623a0b08-ovn-rundir\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.688185 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.688510 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-config\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.695814 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5adc7663-af12-4aeb-948a-39c0623a0b08-combined-ca-bundle\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.701119 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5adc7663-af12-4aeb-948a-39c0623a0b08-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.731310 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hfhk\" (UniqueName: \"kubernetes.io/projected/facb7dd2-b0d1-457f-9b21-900a2132b3fd-kube-api-access-6hfhk\") pod \"dnsmasq-dns-7fd796d7df-4wjwc\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.738308 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ptpn\" (UniqueName: \"kubernetes.io/projected/5adc7663-af12-4aeb-948a-39c0623a0b08-kube-api-access-7ptpn\") pod \"ovn-controller-metrics-nj984\" (UID: \"5adc7663-af12-4aeb-948a-39c0623a0b08\") " pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.748273 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.749495 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.762222 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.771576 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.771751 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-9p8s6" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.771870 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.780427 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.786872 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.787090 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.787166 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f85de91c-1548-4c1b-810b-9998c8331bb6-config\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.787236 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f85de91c-1548-4c1b-810b-9998c8331bb6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.787515 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-2jsgn"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.797987 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.798433 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f85de91c-1548-4c1b-810b-9998c8331bb6-scripts\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.798484 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw2l7\" (UniqueName: \"kubernetes.io/projected/f85de91c-1548-4c1b-810b-9998c8331bb6-kube-api-access-pw2l7\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.798511 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.808776 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.821051 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.826147 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-2jsgn"] Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.858367 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-nj984" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.903809 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f85de91c-1548-4c1b-810b-9998c8331bb6-config\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904030 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f85de91c-1548-4c1b-810b-9998c8331bb6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904140 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904215 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8srwl\" (UniqueName: \"kubernetes.io/projected/d604dc72-ca2a-4ef5-bed1-07a12ce10183-kube-api-access-8srwl\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904295 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f85de91c-1548-4c1b-810b-9998c8331bb6-scripts\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904403 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw2l7\" (UniqueName: \"kubernetes.io/projected/f85de91c-1548-4c1b-810b-9998c8331bb6-kube-api-access-pw2l7\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904488 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904595 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-config\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904689 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f85de91c-1548-4c1b-810b-9998c8331bb6-config\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904847 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904929 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.905117 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.905494 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.904428 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f85de91c-1548-4c1b-810b-9998c8331bb6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.906042 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f85de91c-1548-4c1b-810b-9998c8331bb6-scripts\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.909268 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.913254 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.923504 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw2l7\" (UniqueName: \"kubernetes.io/projected/f85de91c-1548-4c1b-810b-9998c8331bb6-kube-api-access-pw2l7\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:48 crc kubenswrapper[4814]: I0122 05:34:48.935171 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f85de91c-1548-4c1b-810b-9998c8331bb6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f85de91c-1548-4c1b-810b-9998c8331bb6\") " pod="openstack/ovn-northd-0" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.011498 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.011693 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8srwl\" (UniqueName: \"kubernetes.io/projected/d604dc72-ca2a-4ef5-bed1-07a12ce10183-kube-api-access-8srwl\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.011744 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-config\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.011787 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.011808 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.012458 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.012471 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.012823 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-config\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.013035 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.017684 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.041968 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8srwl\" (UniqueName: \"kubernetes.io/projected/d604dc72-ca2a-4ef5-bed1-07a12ce10183-kube-api-access-8srwl\") pod \"dnsmasq-dns-86db49b7ff-2jsgn\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.085366 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.116168 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlsvb\" (UniqueName: \"kubernetes.io/projected/637640fe-91d4-446a-92b5-bde8a7dea007-kube-api-access-wlsvb\") pod \"637640fe-91d4-446a-92b5-bde8a7dea007\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.116237 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-config\") pod \"637640fe-91d4-446a-92b5-bde8a7dea007\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.116400 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-dns-svc\") pod \"637640fe-91d4-446a-92b5-bde8a7dea007\" (UID: \"637640fe-91d4-446a-92b5-bde8a7dea007\") " Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.117022 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "637640fe-91d4-446a-92b5-bde8a7dea007" (UID: "637640fe-91d4-446a-92b5-bde8a7dea007"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.117098 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-config" (OuterVolumeSpecName: "config") pod "637640fe-91d4-446a-92b5-bde8a7dea007" (UID: "637640fe-91d4-446a-92b5-bde8a7dea007"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.122776 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/637640fe-91d4-446a-92b5-bde8a7dea007-kube-api-access-wlsvb" (OuterVolumeSpecName: "kube-api-access-wlsvb") pod "637640fe-91d4-446a-92b5-bde8a7dea007" (UID: "637640fe-91d4-446a-92b5-bde8a7dea007"). InnerVolumeSpecName "kube-api-access-wlsvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.136792 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.171004 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"22c14c36-2eb5-424d-a919-25f2e99eeb44","Type":"ContainerStarted","Data":"a859b6b6c8244733d3bfd805c35ab89852ef9de42452cfa756a80e1942fce6bc"} Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.181542 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" event={"ID":"637640fe-91d4-446a-92b5-bde8a7dea007","Type":"ContainerDied","Data":"98a0a7ea5a5cc30ae2a75e6f8d0cfabccc2b8106918eeb18e7d6f98b4de8986c"} Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.181687 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-7vwmk" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.183593 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"14a83f70-2b64-417d-a198-d51bb829cea1","Type":"ContainerStarted","Data":"ba70de8318a9434d35facf4c16d1dbd28bb5e77cdb81af5035192ac0f65f5894"} Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.232943 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.232970 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlsvb\" (UniqueName: \"kubernetes.io/projected/637640fe-91d4-446a-92b5-bde8a7dea007-kube-api-access-wlsvb\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.232980 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/637640fe-91d4-446a-92b5-bde8a7dea007-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.316453 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.348536 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7vwmk"] Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.358520 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-7vwmk"] Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.434731 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-config\") pod \"8c0804d9-4deb-4e4a-a30b-22babff055b2\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.434846 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-dns-svc\") pod \"8c0804d9-4deb-4e4a-a30b-22babff055b2\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.434929 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nvsq\" (UniqueName: \"kubernetes.io/projected/8c0804d9-4deb-4e4a-a30b-22babff055b2-kube-api-access-8nvsq\") pod \"8c0804d9-4deb-4e4a-a30b-22babff055b2\" (UID: \"8c0804d9-4deb-4e4a-a30b-22babff055b2\") " Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.435953 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-config" (OuterVolumeSpecName: "config") pod "8c0804d9-4deb-4e4a-a30b-22babff055b2" (UID: "8c0804d9-4deb-4e4a-a30b-22babff055b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.436269 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8c0804d9-4deb-4e4a-a30b-22babff055b2" (UID: "8c0804d9-4deb-4e4a-a30b-22babff055b2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.447554 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c0804d9-4deb-4e4a-a30b-22babff055b2-kube-api-access-8nvsq" (OuterVolumeSpecName: "kube-api-access-8nvsq") pod "8c0804d9-4deb-4e4a-a30b-22babff055b2" (UID: "8c0804d9-4deb-4e4a-a30b-22babff055b2"). InnerVolumeSpecName "kube-api-access-8nvsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.518652 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-4wjwc"] Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.536383 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.536402 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c0804d9-4deb-4e4a-a30b-22babff055b2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.536411 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8nvsq\" (UniqueName: \"kubernetes.io/projected/8c0804d9-4deb-4e4a-a30b-22babff055b2-kube-api-access-8nvsq\") on node \"crc\" DevicePath \"\"" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.614337 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.614384 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.614420 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.615248 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9500dee208774edd1316e9481891ac3158cca3bdb31ab2aefff48638b4f8e29b"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.615297 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://9500dee208774edd1316e9481891ac3158cca3bdb31ab2aefff48638b4f8e29b" gracePeriod=600 Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.654783 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-nj984"] Jan 22 05:34:49 crc kubenswrapper[4814]: I0122 05:34:49.674294 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.066729 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-2jsgn"] Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.193917 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f85de91c-1548-4c1b-810b-9998c8331bb6","Type":"ContainerStarted","Data":"6f26772936fc306d0b45ef21104923bd8072184cce7b36896212a573220602d3"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.200425 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="9500dee208774edd1316e9481891ac3158cca3bdb31ab2aefff48638b4f8e29b" exitCode=0 Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.200487 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"9500dee208774edd1316e9481891ac3158cca3bdb31ab2aefff48638b4f8e29b"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.200515 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"4cc634dfae0a47901cc979ba5b63d3858a39aa8e9b0382a2430471166dd22de7"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.200529 4814 scope.go:117] "RemoveContainer" containerID="b60d5a55f7f3e7c7e151368bd532eb06ab5f80edff26a6360b765f6b4951f49e" Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.205298 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" event={"ID":"8c0804d9-4deb-4e4a-a30b-22babff055b2","Type":"ContainerDied","Data":"381f3953b87b4d56677ea401f9cb8a31e99d86f0afbd529324f4188b5464379d"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.205399 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-grh2t" Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.207846 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" event={"ID":"facb7dd2-b0d1-457f-9b21-900a2132b3fd","Type":"ContainerStarted","Data":"ea36b16c5bcb29db4397e9db908b36a8c37cd8dca806cf53c4e89ef9e9824119"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.214885 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-nj984" event={"ID":"5adc7663-af12-4aeb-948a-39c0623a0b08","Type":"ContainerStarted","Data":"867cb43ac6410b3b0d00a7f4df931f82fe454ae4d2dd7b2f06b8866d3406d965"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.215122 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-nj984" event={"ID":"5adc7663-af12-4aeb-948a-39c0623a0b08","Type":"ContainerStarted","Data":"69008d7540755e5feff2b5488360f3cc5c60f97578ec1bf520b5f057c4181fdc"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.222093 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"575e010d-db55-494f-8f39-c492c2bb22c8","Type":"ContainerStarted","Data":"842e63a8ae2426ae6dcdaf8989fbdb4ab66d719f435dedddbf668ebf0c7c807e"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.226751 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" event={"ID":"d604dc72-ca2a-4ef5-bed1-07a12ce10183","Type":"ContainerStarted","Data":"79dba71644577a0015de4b1da77c019f365ada1216a764204d17827dcbb489f4"} Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.276184 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-nj984" podStartSLOduration=2.276167882 podStartE2EDuration="2.276167882s" podCreationTimestamp="2026-01-22 05:34:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:34:50.270898549 +0000 UTC m=+976.354386764" watchObservedRunningTime="2026-01-22 05:34:50.276167882 +0000 UTC m=+976.359656097" Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.383691 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="637640fe-91d4-446a-92b5-bde8a7dea007" path="/var/lib/kubelet/pods/637640fe-91d4-446a-92b5-bde8a7dea007/volumes" Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.384002 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-grh2t"] Jan 22 05:34:50 crc kubenswrapper[4814]: I0122 05:34:50.391203 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-grh2t"] Jan 22 05:34:51 crc kubenswrapper[4814]: I0122 05:34:51.003147 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 22 05:34:51 crc kubenswrapper[4814]: I0122 05:34:51.251059 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" event={"ID":"d604dc72-ca2a-4ef5-bed1-07a12ce10183","Type":"ContainerDied","Data":"716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a"} Jan 22 05:34:51 crc kubenswrapper[4814]: I0122 05:34:51.250524 4814 generic.go:334] "Generic (PLEG): container finished" podID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" containerID="716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a" exitCode=0 Jan 22 05:34:51 crc kubenswrapper[4814]: I0122 05:34:51.257933 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cb12b27a-d0b6-4f39-8795-9001d89527c1","Type":"ContainerStarted","Data":"76f4f04a3e6cb9e09294ce38eeb019e7c9f0539c62e72dc3e5985aaaee5f4bea"} Jan 22 05:34:51 crc kubenswrapper[4814]: I0122 05:34:51.265100 4814 generic.go:334] "Generic (PLEG): container finished" podID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" containerID="262d3bd3a6fae72e5fb1c70661bede5cb173fae6ef906d7fed2ee94f9cf2692c" exitCode=0 Jan 22 05:34:51 crc kubenswrapper[4814]: I0122 05:34:51.265348 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" event={"ID":"facb7dd2-b0d1-457f-9b21-900a2132b3fd","Type":"ContainerDied","Data":"262d3bd3a6fae72e5fb1c70661bede5cb173fae6ef906d7fed2ee94f9cf2692c"} Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.274039 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" event={"ID":"facb7dd2-b0d1-457f-9b21-900a2132b3fd","Type":"ContainerStarted","Data":"4b6bc12f16f00e0ef83b120206570c912ad93627b3f3b13ad461e93ae06d8183"} Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.274666 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.275979 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" event={"ID":"d604dc72-ca2a-4ef5-bed1-07a12ce10183","Type":"ContainerStarted","Data":"8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df"} Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.276107 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.278646 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f85de91c-1548-4c1b-810b-9998c8331bb6","Type":"ContainerStarted","Data":"d0fe46e9f24d67f4a71b89d10536e53bdb08ed9be9e1ce7df35e41c52f8c3cc3"} Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.278673 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f85de91c-1548-4c1b-810b-9998c8331bb6","Type":"ContainerStarted","Data":"1912854c6c4baaf6ec83e564f129d8b0b03075ff1ffc3a2a1f4fb657d7ea74c9"} Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.279135 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.327400 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" podStartSLOduration=3.877497728 podStartE2EDuration="4.327381871s" podCreationTimestamp="2026-01-22 05:34:48 +0000 UTC" firstStartedPulling="2026-01-22 05:34:49.550995068 +0000 UTC m=+975.634483283" lastFinishedPulling="2026-01-22 05:34:50.000879211 +0000 UTC m=+976.084367426" observedRunningTime="2026-01-22 05:34:52.296129682 +0000 UTC m=+978.379617907" watchObservedRunningTime="2026-01-22 05:34:52.327381871 +0000 UTC m=+978.410870146" Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.341747 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" podStartSLOduration=4.341728455 podStartE2EDuration="4.341728455s" podCreationTimestamp="2026-01-22 05:34:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:34:52.337969529 +0000 UTC m=+978.421457744" watchObservedRunningTime="2026-01-22 05:34:52.341728455 +0000 UTC m=+978.425216670" Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.388581 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.51742841 podStartE2EDuration="4.388562897s" podCreationTimestamp="2026-01-22 05:34:48 +0000 UTC" firstStartedPulling="2026-01-22 05:34:49.656776827 +0000 UTC m=+975.740265032" lastFinishedPulling="2026-01-22 05:34:51.527911304 +0000 UTC m=+977.611399519" observedRunningTime="2026-01-22 05:34:52.378859726 +0000 UTC m=+978.462347941" watchObservedRunningTime="2026-01-22 05:34:52.388562897 +0000 UTC m=+978.472051112" Jan 22 05:34:52 crc kubenswrapper[4814]: I0122 05:34:52.430572 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c0804d9-4deb-4e4a-a30b-22babff055b2" path="/var/lib/kubelet/pods/8c0804d9-4deb-4e4a-a30b-22babff055b2/volumes" Jan 22 05:34:53 crc kubenswrapper[4814]: I0122 05:34:53.651807 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 22 05:34:54 crc kubenswrapper[4814]: I0122 05:34:54.293776 4814 generic.go:334] "Generic (PLEG): container finished" podID="575e010d-db55-494f-8f39-c492c2bb22c8" containerID="842e63a8ae2426ae6dcdaf8989fbdb4ab66d719f435dedddbf668ebf0c7c807e" exitCode=0 Jan 22 05:34:54 crc kubenswrapper[4814]: I0122 05:34:54.293862 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"575e010d-db55-494f-8f39-c492c2bb22c8","Type":"ContainerDied","Data":"842e63a8ae2426ae6dcdaf8989fbdb4ab66d719f435dedddbf668ebf0c7c807e"} Jan 22 05:34:55 crc kubenswrapper[4814]: I0122 05:34:55.309392 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"575e010d-db55-494f-8f39-c492c2bb22c8","Type":"ContainerStarted","Data":"544f702278f5bbbc1b518d256e84b5aa5feb609a681f1948eb5c31824aadcefe"} Jan 22 05:34:55 crc kubenswrapper[4814]: I0122 05:34:55.313241 4814 generic.go:334] "Generic (PLEG): container finished" podID="cb12b27a-d0b6-4f39-8795-9001d89527c1" containerID="76f4f04a3e6cb9e09294ce38eeb019e7c9f0539c62e72dc3e5985aaaee5f4bea" exitCode=0 Jan 22 05:34:55 crc kubenswrapper[4814]: I0122 05:34:55.313287 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cb12b27a-d0b6-4f39-8795-9001d89527c1","Type":"ContainerDied","Data":"76f4f04a3e6cb9e09294ce38eeb019e7c9f0539c62e72dc3e5985aaaee5f4bea"} Jan 22 05:34:55 crc kubenswrapper[4814]: I0122 05:34:55.339027 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.308049469 podStartE2EDuration="50.339010284s" podCreationTimestamp="2026-01-22 05:34:05 +0000 UTC" firstStartedPulling="2026-01-22 05:34:07.828326667 +0000 UTC m=+933.911814882" lastFinishedPulling="2026-01-22 05:34:49.859287482 +0000 UTC m=+975.942775697" observedRunningTime="2026-01-22 05:34:55.335051111 +0000 UTC m=+981.418539336" watchObservedRunningTime="2026-01-22 05:34:55.339010284 +0000 UTC m=+981.422498499" Jan 22 05:34:56 crc kubenswrapper[4814]: I0122 05:34:56.326548 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"cb12b27a-d0b6-4f39-8795-9001d89527c1","Type":"ContainerStarted","Data":"7ee57eca066c9d7df746df8eb1508e28bfd575cab339bd5ac0f31f26edfe9a80"} Jan 22 05:34:56 crc kubenswrapper[4814]: I0122 05:34:56.379852 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371987.474949 podStartE2EDuration="49.379826739s" podCreationTimestamp="2026-01-22 05:34:07 +0000 UTC" firstStartedPulling="2026-01-22 05:34:09.41159596 +0000 UTC m=+935.495084176" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:34:56.370093348 +0000 UTC m=+982.453581643" watchObservedRunningTime="2026-01-22 05:34:56.379826739 +0000 UTC m=+982.463314974" Jan 22 05:34:57 crc kubenswrapper[4814]: I0122 05:34:57.310089 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 22 05:34:57 crc kubenswrapper[4814]: I0122 05:34:57.310134 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 22 05:34:58 crc kubenswrapper[4814]: I0122 05:34:58.666070 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:58 crc kubenswrapper[4814]: I0122 05:34:58.667312 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 22 05:34:58 crc kubenswrapper[4814]: I0122 05:34:58.800717 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:34:59 crc kubenswrapper[4814]: I0122 05:34:59.139808 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:34:59 crc kubenswrapper[4814]: I0122 05:34:59.215134 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-4wjwc"] Jan 22 05:34:59 crc kubenswrapper[4814]: I0122 05:34:59.349049 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" podUID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" containerName="dnsmasq-dns" containerID="cri-o://4b6bc12f16f00e0ef83b120206570c912ad93627b3f3b13ad461e93ae06d8183" gracePeriod=10 Jan 22 05:35:00 crc kubenswrapper[4814]: I0122 05:35:00.355424 4814 generic.go:334] "Generic (PLEG): container finished" podID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" containerID="4b6bc12f16f00e0ef83b120206570c912ad93627b3f3b13ad461e93ae06d8183" exitCode=0 Jan 22 05:35:00 crc kubenswrapper[4814]: I0122 05:35:00.355497 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" event={"ID":"facb7dd2-b0d1-457f-9b21-900a2132b3fd","Type":"ContainerDied","Data":"4b6bc12f16f00e0ef83b120206570c912ad93627b3f3b13ad461e93ae06d8183"} Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.183104 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-tkzhv"] Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.184477 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.225653 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tkzhv"] Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.252890 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.252932 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-config\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.252961 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.253028 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkzxh\" (UniqueName: \"kubernetes.io/projected/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-kube-api-access-bkzxh\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.253082 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-dns-svc\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.354547 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-dns-svc\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.354621 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.354655 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-config\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.354676 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.354776 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkzxh\" (UniqueName: \"kubernetes.io/projected/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-kube-api-access-bkzxh\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.356150 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-dns-svc\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.356281 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.356932 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.357395 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-config\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.404446 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkzxh\" (UniqueName: \"kubernetes.io/projected/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-kube-api-access-bkzxh\") pod \"dnsmasq-dns-698758b865-tkzhv\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.499180 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:01 crc kubenswrapper[4814]: I0122 05:35:01.993055 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tkzhv"] Jan 22 05:35:01 crc kubenswrapper[4814]: W0122 05:35:01.999790 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0c9ee0b_dd3a_4bd0_abc7_162fdc023f33.slice/crio-7794a3572cb345dbd00530047c42b03588e32254130b496d3628695fc4871686 WatchSource:0}: Error finding container 7794a3572cb345dbd00530047c42b03588e32254130b496d3628695fc4871686: Status 404 returned error can't find the container with id 7794a3572cb345dbd00530047c42b03588e32254130b496d3628695fc4871686 Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.379321 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tkzhv" event={"ID":"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33","Type":"ContainerStarted","Data":"7794a3572cb345dbd00530047c42b03588e32254130b496d3628695fc4871686"} Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.544097 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.548608 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.552097 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.552330 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.552451 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-sf8p6" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.552572 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.627073 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.674349 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.674424 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/e7eb7182-d869-4625-99e2-6abc75aee22d-lock\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.674451 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7eb7182-d869-4625-99e2-6abc75aee22d-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.674526 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhzp5\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-kube-api-access-qhzp5\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.674554 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.674637 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/e7eb7182-d869-4625-99e2-6abc75aee22d-cache\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.776217 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhzp5\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-kube-api-access-qhzp5\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.776280 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.776364 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/e7eb7182-d869-4625-99e2-6abc75aee22d-cache\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.776443 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.776534 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/e7eb7182-d869-4625-99e2-6abc75aee22d-lock\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.776566 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7eb7182-d869-4625-99e2-6abc75aee22d-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: E0122 05:35:02.777910 4814 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 05:35:02 crc kubenswrapper[4814]: E0122 05:35:02.777939 4814 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 05:35:02 crc kubenswrapper[4814]: E0122 05:35:02.777994 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift podName:e7eb7182-d869-4625-99e2-6abc75aee22d nodeName:}" failed. No retries permitted until 2026-01-22 05:35:03.277973914 +0000 UTC m=+989.361462129 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift") pod "swift-storage-0" (UID: "e7eb7182-d869-4625-99e2-6abc75aee22d") : configmap "swift-ring-files" not found Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.778261 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/e7eb7182-d869-4625-99e2-6abc75aee22d-cache\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.778275 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.778556 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/e7eb7182-d869-4625-99e2-6abc75aee22d-lock\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.782385 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7eb7182-d869-4625-99e2-6abc75aee22d-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.806075 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhzp5\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-kube-api-access-qhzp5\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:02 crc kubenswrapper[4814]: I0122 05:35:02.818778 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.039304 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-xhppx"] Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.040235 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.046041 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.046248 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.046398 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.053059 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xhppx"] Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.096756 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfqfm\" (UniqueName: \"kubernetes.io/projected/657e06e5-a5ca-4104-bc6c-12c31d9a1984-kube-api-access-zfqfm\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.097039 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-combined-ca-bundle\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.097061 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-scripts\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.097083 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-swiftconf\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.097104 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-dispersionconf\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.097136 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/657e06e5-a5ca-4104-bc6c-12c31d9a1984-etc-swift\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.097192 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-ring-data-devices\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.198542 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-ring-data-devices\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.199357 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-ring-data-devices\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.200512 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfqfm\" (UniqueName: \"kubernetes.io/projected/657e06e5-a5ca-4104-bc6c-12c31d9a1984-kube-api-access-zfqfm\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.200734 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-combined-ca-bundle\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.201288 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-scripts\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.201396 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-swiftconf\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.201696 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-dispersionconf\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.201814 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/657e06e5-a5ca-4104-bc6c-12c31d9a1984-etc-swift\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.202017 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-scripts\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.202290 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/657e06e5-a5ca-4104-bc6c-12c31d9a1984-etc-swift\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.204864 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-dispersionconf\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.206339 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-combined-ca-bundle\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.212253 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-swiftconf\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.228039 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfqfm\" (UniqueName: \"kubernetes.io/projected/657e06e5-a5ca-4104-bc6c-12c31d9a1984-kube-api-access-zfqfm\") pod \"swift-ring-rebalance-xhppx\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.303152 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:03 crc kubenswrapper[4814]: E0122 05:35:03.303312 4814 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 05:35:03 crc kubenswrapper[4814]: E0122 05:35:03.303349 4814 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 05:35:03 crc kubenswrapper[4814]: E0122 05:35:03.303401 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift podName:e7eb7182-d869-4625-99e2-6abc75aee22d nodeName:}" failed. No retries permitted until 2026-01-22 05:35:04.303385696 +0000 UTC m=+990.386873911 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift") pod "swift-storage-0" (UID: "e7eb7182-d869-4625-99e2-6abc75aee22d") : configmap "swift-ring-files" not found Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.322136 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.357330 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.404187 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-config\") pod \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.404322 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-ovsdbserver-nb\") pod \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.404378 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-dns-svc\") pod \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.404453 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hfhk\" (UniqueName: \"kubernetes.io/projected/facb7dd2-b0d1-457f-9b21-900a2132b3fd-kube-api-access-6hfhk\") pod \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\" (UID: \"facb7dd2-b0d1-457f-9b21-900a2132b3fd\") " Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.410455 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/facb7dd2-b0d1-457f-9b21-900a2132b3fd-kube-api-access-6hfhk" (OuterVolumeSpecName: "kube-api-access-6hfhk") pod "facb7dd2-b0d1-457f-9b21-900a2132b3fd" (UID: "facb7dd2-b0d1-457f-9b21-900a2132b3fd"). InnerVolumeSpecName "kube-api-access-6hfhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.424591 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" event={"ID":"facb7dd2-b0d1-457f-9b21-900a2132b3fd","Type":"ContainerDied","Data":"ea36b16c5bcb29db4397e9db908b36a8c37cd8dca806cf53c4e89ef9e9824119"} Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.424652 4814 scope.go:117] "RemoveContainer" containerID="4b6bc12f16f00e0ef83b120206570c912ad93627b3f3b13ad461e93ae06d8183" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.424791 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-4wjwc" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.438394 4814 generic.go:334] "Generic (PLEG): container finished" podID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" containerID="31c4ea1a6e886e542e501677a943e1b7a8b0732519bc9e9a6940f40a45a4d197" exitCode=0 Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.438435 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tkzhv" event={"ID":"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33","Type":"ContainerDied","Data":"31c4ea1a6e886e542e501677a943e1b7a8b0732519bc9e9a6940f40a45a4d197"} Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.482436 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-config" (OuterVolumeSpecName: "config") pod "facb7dd2-b0d1-457f-9b21-900a2132b3fd" (UID: "facb7dd2-b0d1-457f-9b21-900a2132b3fd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.487966 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.507264 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "facb7dd2-b0d1-457f-9b21-900a2132b3fd" (UID: "facb7dd2-b0d1-457f-9b21-900a2132b3fd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.508077 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hfhk\" (UniqueName: \"kubernetes.io/projected/facb7dd2-b0d1-457f-9b21-900a2132b3fd-kube-api-access-6hfhk\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.508099 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.508110 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.543684 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "facb7dd2-b0d1-457f-9b21-900a2132b3fd" (UID: "facb7dd2-b0d1-457f-9b21-900a2132b3fd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.628520 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/facb7dd2-b0d1-457f-9b21-900a2132b3fd-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.645063 4814 scope.go:117] "RemoveContainer" containerID="262d3bd3a6fae72e5fb1c70661bede5cb173fae6ef906d7fed2ee94f9cf2692c" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.690408 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.789271 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-4wjwc"] Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.794933 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-4wjwc"] Jan 22 05:35:03 crc kubenswrapper[4814]: I0122 05:35:03.959187 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-xhppx"] Jan 22 05:35:03 crc kubenswrapper[4814]: W0122 05:35:03.962330 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod657e06e5_a5ca_4104_bc6c_12c31d9a1984.slice/crio-8dcf0fef3925eb88ba426da17108d5652ad42f7bd550249ee8feaf4666947d91 WatchSource:0}: Error finding container 8dcf0fef3925eb88ba426da17108d5652ad42f7bd550249ee8feaf4666947d91: Status 404 returned error can't find the container with id 8dcf0fef3925eb88ba426da17108d5652ad42f7bd550249ee8feaf4666947d91 Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.199215 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.339065 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:04 crc kubenswrapper[4814]: E0122 05:35:04.339282 4814 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 05:35:04 crc kubenswrapper[4814]: E0122 05:35:04.339302 4814 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 05:35:04 crc kubenswrapper[4814]: E0122 05:35:04.339350 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift podName:e7eb7182-d869-4625-99e2-6abc75aee22d nodeName:}" failed. No retries permitted until 2026-01-22 05:35:06.339331941 +0000 UTC m=+992.422820156 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift") pod "swift-storage-0" (UID: "e7eb7182-d869-4625-99e2-6abc75aee22d") : configmap "swift-ring-files" not found Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.359548 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" path="/var/lib/kubelet/pods/facb7dd2-b0d1-457f-9b21-900a2132b3fd/volumes" Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.447491 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xhppx" event={"ID":"657e06e5-a5ca-4104-bc6c-12c31d9a1984","Type":"ContainerStarted","Data":"8dcf0fef3925eb88ba426da17108d5652ad42f7bd550249ee8feaf4666947d91"} Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.449245 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tkzhv" event={"ID":"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33","Type":"ContainerStarted","Data":"6bc0e39c263a87bf5acd852a15127c3e30522ce1b8d70bc9c8199022bad57eaf"} Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.449387 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.785228 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.814677 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-tkzhv" podStartSLOduration=3.814661712 podStartE2EDuration="3.814661712s" podCreationTimestamp="2026-01-22 05:35:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:04.502477027 +0000 UTC m=+990.585965242" watchObservedRunningTime="2026-01-22 05:35:04.814661712 +0000 UTC m=+990.898149927" Jan 22 05:35:04 crc kubenswrapper[4814]: I0122 05:35:04.894224 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.663331 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-z4slr"] Jan 22 05:35:05 crc kubenswrapper[4814]: E0122 05:35:05.663660 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" containerName="dnsmasq-dns" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.663673 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" containerName="dnsmasq-dns" Jan 22 05:35:05 crc kubenswrapper[4814]: E0122 05:35:05.663718 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" containerName="init" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.663724 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" containerName="init" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.663916 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="facb7dd2-b0d1-457f-9b21-900a2132b3fd" containerName="dnsmasq-dns" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.664422 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.667767 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.671834 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d670a1d9-c6a7-44c8-969b-168725f674cb-operator-scripts\") pod \"root-account-create-update-z4slr\" (UID: \"d670a1d9-c6a7-44c8-969b-168725f674cb\") " pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.671876 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx4rz\" (UniqueName: \"kubernetes.io/projected/d670a1d9-c6a7-44c8-969b-168725f674cb-kube-api-access-fx4rz\") pod \"root-account-create-update-z4slr\" (UID: \"d670a1d9-c6a7-44c8-969b-168725f674cb\") " pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.672646 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-z4slr"] Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.773185 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d670a1d9-c6a7-44c8-969b-168725f674cb-operator-scripts\") pod \"root-account-create-update-z4slr\" (UID: \"d670a1d9-c6a7-44c8-969b-168725f674cb\") " pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.773232 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fx4rz\" (UniqueName: \"kubernetes.io/projected/d670a1d9-c6a7-44c8-969b-168725f674cb-kube-api-access-fx4rz\") pod \"root-account-create-update-z4slr\" (UID: \"d670a1d9-c6a7-44c8-969b-168725f674cb\") " pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.776714 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d670a1d9-c6a7-44c8-969b-168725f674cb-operator-scripts\") pod \"root-account-create-update-z4slr\" (UID: \"d670a1d9-c6a7-44c8-969b-168725f674cb\") " pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.804401 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fx4rz\" (UniqueName: \"kubernetes.io/projected/d670a1d9-c6a7-44c8-969b-168725f674cb-kube-api-access-fx4rz\") pod \"root-account-create-update-z4slr\" (UID: \"d670a1d9-c6a7-44c8-969b-168725f674cb\") " pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:05 crc kubenswrapper[4814]: I0122 05:35:05.997089 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:06 crc kubenswrapper[4814]: I0122 05:35:06.381578 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:06 crc kubenswrapper[4814]: E0122 05:35:06.381898 4814 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 05:35:06 crc kubenswrapper[4814]: E0122 05:35:06.381926 4814 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 05:35:06 crc kubenswrapper[4814]: E0122 05:35:06.381975 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift podName:e7eb7182-d869-4625-99e2-6abc75aee22d nodeName:}" failed. No retries permitted until 2026-01-22 05:35:10.381959323 +0000 UTC m=+996.465447538 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift") pod "swift-storage-0" (UID: "e7eb7182-d869-4625-99e2-6abc75aee22d") : configmap "swift-ring-files" not found Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.161103 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-b8mdj"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.162241 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.176732 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-b8mdj"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.220759 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b21045ad-7166-4a40-abb8-8337cbcb1220-operator-scripts\") pod \"keystone-db-create-b8mdj\" (UID: \"b21045ad-7166-4a40-abb8-8337cbcb1220\") " pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.297789 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-533f-account-create-update-w8ktx"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.300763 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.303558 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.313193 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-533f-account-create-update-w8ktx"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.325100 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsph2\" (UniqueName: \"kubernetes.io/projected/b21045ad-7166-4a40-abb8-8337cbcb1220-kube-api-access-qsph2\") pod \"keystone-db-create-b8mdj\" (UID: \"b21045ad-7166-4a40-abb8-8337cbcb1220\") " pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.325140 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b21045ad-7166-4a40-abb8-8337cbcb1220-operator-scripts\") pod \"keystone-db-create-b8mdj\" (UID: \"b21045ad-7166-4a40-abb8-8337cbcb1220\") " pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.328451 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b21045ad-7166-4a40-abb8-8337cbcb1220-operator-scripts\") pod \"keystone-db-create-b8mdj\" (UID: \"b21045ad-7166-4a40-abb8-8337cbcb1220\") " pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.428874 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twxnm\" (UniqueName: \"kubernetes.io/projected/36d7b817-7906-48c6-ac41-d277d095531c-kube-api-access-twxnm\") pod \"keystone-533f-account-create-update-w8ktx\" (UID: \"36d7b817-7906-48c6-ac41-d277d095531c\") " pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.429213 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsph2\" (UniqueName: \"kubernetes.io/projected/b21045ad-7166-4a40-abb8-8337cbcb1220-kube-api-access-qsph2\") pod \"keystone-db-create-b8mdj\" (UID: \"b21045ad-7166-4a40-abb8-8337cbcb1220\") " pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.429283 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d7b817-7906-48c6-ac41-d277d095531c-operator-scripts\") pod \"keystone-533f-account-create-update-w8ktx\" (UID: \"36d7b817-7906-48c6-ac41-d277d095531c\") " pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.453491 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsph2\" (UniqueName: \"kubernetes.io/projected/b21045ad-7166-4a40-abb8-8337cbcb1220-kube-api-access-qsph2\") pod \"keystone-db-create-b8mdj\" (UID: \"b21045ad-7166-4a40-abb8-8337cbcb1220\") " pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.476297 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xhppx" event={"ID":"657e06e5-a5ca-4104-bc6c-12c31d9a1984","Type":"ContainerStarted","Data":"c09342ec01b5ffc2a1e34a782fc4a5dd16705b2dbf951d681f6d60c33755fe2c"} Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.500283 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-xhppx" podStartSLOduration=2.240034864 podStartE2EDuration="6.500267742s" podCreationTimestamp="2026-01-22 05:35:02 +0000 UTC" firstStartedPulling="2026-01-22 05:35:03.964542866 +0000 UTC m=+990.048031081" lastFinishedPulling="2026-01-22 05:35:08.224775744 +0000 UTC m=+994.308263959" observedRunningTime="2026-01-22 05:35:08.494111861 +0000 UTC m=+994.577600076" watchObservedRunningTime="2026-01-22 05:35:08.500267742 +0000 UTC m=+994.583755957" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.532498 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twxnm\" (UniqueName: \"kubernetes.io/projected/36d7b817-7906-48c6-ac41-d277d095531c-kube-api-access-twxnm\") pod \"keystone-533f-account-create-update-w8ktx\" (UID: \"36d7b817-7906-48c6-ac41-d277d095531c\") " pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.532553 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d7b817-7906-48c6-ac41-d277d095531c-operator-scripts\") pod \"keystone-533f-account-create-update-w8ktx\" (UID: \"36d7b817-7906-48c6-ac41-d277d095531c\") " pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.534759 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d7b817-7906-48c6-ac41-d277d095531c-operator-scripts\") pod \"keystone-533f-account-create-update-w8ktx\" (UID: \"36d7b817-7906-48c6-ac41-d277d095531c\") " pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.553822 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twxnm\" (UniqueName: \"kubernetes.io/projected/36d7b817-7906-48c6-ac41-d277d095531c-kube-api-access-twxnm\") pod \"keystone-533f-account-create-update-w8ktx\" (UID: \"36d7b817-7906-48c6-ac41-d277d095531c\") " pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.565371 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-6wgx7"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.566299 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.574323 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-6wgx7"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.604499 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.672277 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-z4slr"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.723394 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.741402 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-operator-scripts\") pod \"placement-db-create-6wgx7\" (UID: \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\") " pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.741672 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w427b\" (UniqueName: \"kubernetes.io/projected/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-kube-api-access-w427b\") pod \"placement-db-create-6wgx7\" (UID: \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\") " pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.790247 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-4c34-account-create-update-bjrjx"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.791447 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.795240 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.807294 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-4c34-account-create-update-bjrjx"] Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.852770 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2swp6\" (UniqueName: \"kubernetes.io/projected/1607a334-0e23-4696-8a95-e364d28fca56-kube-api-access-2swp6\") pod \"placement-4c34-account-create-update-bjrjx\" (UID: \"1607a334-0e23-4696-8a95-e364d28fca56\") " pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.853096 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-operator-scripts\") pod \"placement-db-create-6wgx7\" (UID: \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\") " pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.853186 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w427b\" (UniqueName: \"kubernetes.io/projected/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-kube-api-access-w427b\") pod \"placement-db-create-6wgx7\" (UID: \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\") " pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.853234 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1607a334-0e23-4696-8a95-e364d28fca56-operator-scripts\") pod \"placement-4c34-account-create-update-bjrjx\" (UID: \"1607a334-0e23-4696-8a95-e364d28fca56\") " pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.854515 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-operator-scripts\") pod \"placement-db-create-6wgx7\" (UID: \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\") " pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.890286 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w427b\" (UniqueName: \"kubernetes.io/projected/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-kube-api-access-w427b\") pod \"placement-db-create-6wgx7\" (UID: \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\") " pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.955569 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1607a334-0e23-4696-8a95-e364d28fca56-operator-scripts\") pod \"placement-4c34-account-create-update-bjrjx\" (UID: \"1607a334-0e23-4696-8a95-e364d28fca56\") " pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.955672 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2swp6\" (UniqueName: \"kubernetes.io/projected/1607a334-0e23-4696-8a95-e364d28fca56-kube-api-access-2swp6\") pod \"placement-4c34-account-create-update-bjrjx\" (UID: \"1607a334-0e23-4696-8a95-e364d28fca56\") " pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.956692 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1607a334-0e23-4696-8a95-e364d28fca56-operator-scripts\") pod \"placement-4c34-account-create-update-bjrjx\" (UID: \"1607a334-0e23-4696-8a95-e364d28fca56\") " pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:08 crc kubenswrapper[4814]: I0122 05:35:08.974401 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2swp6\" (UniqueName: \"kubernetes.io/projected/1607a334-0e23-4696-8a95-e364d28fca56-kube-api-access-2swp6\") pod \"placement-4c34-account-create-update-bjrjx\" (UID: \"1607a334-0e23-4696-8a95-e364d28fca56\") " pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.159421 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-b8mdj"] Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.171820 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.184815 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.341547 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-533f-account-create-update-w8ktx"] Jan 22 05:35:09 crc kubenswrapper[4814]: W0122 05:35:09.375910 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36d7b817_7906_48c6_ac41_d277d095531c.slice/crio-eacb0ab77be76f1b35ac798ffc8dc46a3db5497bf1a8b4396daade6918b8d422 WatchSource:0}: Error finding container eacb0ab77be76f1b35ac798ffc8dc46a3db5497bf1a8b4396daade6918b8d422: Status 404 returned error can't find the container with id eacb0ab77be76f1b35ac798ffc8dc46a3db5497bf1a8b4396daade6918b8d422 Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.494091 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-533f-account-create-update-w8ktx" event={"ID":"36d7b817-7906-48c6-ac41-d277d095531c","Type":"ContainerStarted","Data":"eacb0ab77be76f1b35ac798ffc8dc46a3db5497bf1a8b4396daade6918b8d422"} Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.495079 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-b8mdj" event={"ID":"b21045ad-7166-4a40-abb8-8337cbcb1220","Type":"ContainerStarted","Data":"58fe8cb4ecd4275bf87ccf4585454cce95f776ae5ff14645419b68e9a32e73a7"} Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.495099 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-b8mdj" event={"ID":"b21045ad-7166-4a40-abb8-8337cbcb1220","Type":"ContainerStarted","Data":"beb69aacf0c0ef254e267d36f615db2275cdc776bc716dd3f41079cceb4b4960"} Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.504466 4814 generic.go:334] "Generic (PLEG): container finished" podID="d670a1d9-c6a7-44c8-969b-168725f674cb" containerID="536da12983e15963b7518a37321f5ff40c64e2172e1bcc7103071af96b9535bc" exitCode=0 Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.504567 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-z4slr" event={"ID":"d670a1d9-c6a7-44c8-969b-168725f674cb","Type":"ContainerDied","Data":"536da12983e15963b7518a37321f5ff40c64e2172e1bcc7103071af96b9535bc"} Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.504607 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-z4slr" event={"ID":"d670a1d9-c6a7-44c8-969b-168725f674cb","Type":"ContainerStarted","Data":"bd768fb1c9f27dc6af49c90953cb4d036dd1f3cea834435ae861212b4a9039fd"} Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.518647 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-b8mdj" podStartSLOduration=1.518614731 podStartE2EDuration="1.518614731s" podCreationTimestamp="2026-01-22 05:35:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:09.517194287 +0000 UTC m=+995.600682502" watchObservedRunningTime="2026-01-22 05:35:09.518614731 +0000 UTC m=+995.602102946" Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.701735 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-4c34-account-create-update-bjrjx"] Jan 22 05:35:09 crc kubenswrapper[4814]: W0122 05:35:09.702837 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1607a334_0e23_4696_8a95_e364d28fca56.slice/crio-072c5a97fe8a37bbb018b19c6dfec45560143315f2f70b2c4c615a525461bb22 WatchSource:0}: Error finding container 072c5a97fe8a37bbb018b19c6dfec45560143315f2f70b2c4c615a525461bb22: Status 404 returned error can't find the container with id 072c5a97fe8a37bbb018b19c6dfec45560143315f2f70b2c4c615a525461bb22 Jan 22 05:35:09 crc kubenswrapper[4814]: I0122 05:35:09.803688 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-6wgx7"] Jan 22 05:35:09 crc kubenswrapper[4814]: W0122 05:35:09.815112 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc87bfbf_1c0b_4502_8ad7_8913a6099bf0.slice/crio-fb968f7bc8b94199fce6127a58e92c93abde0c1b2bd5f2f5d61eec8dea208e1d WatchSource:0}: Error finding container fb968f7bc8b94199fce6127a58e92c93abde0c1b2bd5f2f5d61eec8dea208e1d: Status 404 returned error can't find the container with id fb968f7bc8b94199fce6127a58e92c93abde0c1b2bd5f2f5d61eec8dea208e1d Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.393582 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:10 crc kubenswrapper[4814]: E0122 05:35:10.393807 4814 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 22 05:35:10 crc kubenswrapper[4814]: E0122 05:35:10.394000 4814 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 22 05:35:10 crc kubenswrapper[4814]: E0122 05:35:10.394078 4814 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift podName:e7eb7182-d869-4625-99e2-6abc75aee22d nodeName:}" failed. No retries permitted until 2026-01-22 05:35:18.394048642 +0000 UTC m=+1004.477536867 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift") pod "swift-storage-0" (UID: "e7eb7182-d869-4625-99e2-6abc75aee22d") : configmap "swift-ring-files" not found Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.522220 4814 generic.go:334] "Generic (PLEG): container finished" podID="fc87bfbf-1c0b-4502-8ad7-8913a6099bf0" containerID="daf8289d134302fe9ca2a77b129432b464d66ddefc88c9b0427d879625f34116" exitCode=0 Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.522288 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6wgx7" event={"ID":"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0","Type":"ContainerDied","Data":"daf8289d134302fe9ca2a77b129432b464d66ddefc88c9b0427d879625f34116"} Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.522316 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6wgx7" event={"ID":"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0","Type":"ContainerStarted","Data":"fb968f7bc8b94199fce6127a58e92c93abde0c1b2bd5f2f5d61eec8dea208e1d"} Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.527205 4814 generic.go:334] "Generic (PLEG): container finished" podID="1607a334-0e23-4696-8a95-e364d28fca56" containerID="e1e3a3b7c7353a02d60300e36dee3a8f84f35c8a52f88100867d6d486fe787f0" exitCode=0 Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.527265 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4c34-account-create-update-bjrjx" event={"ID":"1607a334-0e23-4696-8a95-e364d28fca56","Type":"ContainerDied","Data":"e1e3a3b7c7353a02d60300e36dee3a8f84f35c8a52f88100867d6d486fe787f0"} Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.527286 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4c34-account-create-update-bjrjx" event={"ID":"1607a334-0e23-4696-8a95-e364d28fca56","Type":"ContainerStarted","Data":"072c5a97fe8a37bbb018b19c6dfec45560143315f2f70b2c4c615a525461bb22"} Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.542291 4814 generic.go:334] "Generic (PLEG): container finished" podID="36d7b817-7906-48c6-ac41-d277d095531c" containerID="535a38b9def1366acf26b3372d3ce95e22f01f531ac03cac77134ffad5b26896" exitCode=0 Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.542397 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-533f-account-create-update-w8ktx" event={"ID":"36d7b817-7906-48c6-ac41-d277d095531c","Type":"ContainerDied","Data":"535a38b9def1366acf26b3372d3ce95e22f01f531ac03cac77134ffad5b26896"} Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.545884 4814 generic.go:334] "Generic (PLEG): container finished" podID="b21045ad-7166-4a40-abb8-8337cbcb1220" containerID="58fe8cb4ecd4275bf87ccf4585454cce95f776ae5ff14645419b68e9a32e73a7" exitCode=0 Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.545945 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-b8mdj" event={"ID":"b21045ad-7166-4a40-abb8-8337cbcb1220","Type":"ContainerDied","Data":"58fe8cb4ecd4275bf87ccf4585454cce95f776ae5ff14645419b68e9a32e73a7"} Jan 22 05:35:10 crc kubenswrapper[4814]: I0122 05:35:10.921108 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.103147 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fx4rz\" (UniqueName: \"kubernetes.io/projected/d670a1d9-c6a7-44c8-969b-168725f674cb-kube-api-access-fx4rz\") pod \"d670a1d9-c6a7-44c8-969b-168725f674cb\" (UID: \"d670a1d9-c6a7-44c8-969b-168725f674cb\") " Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.103240 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d670a1d9-c6a7-44c8-969b-168725f674cb-operator-scripts\") pod \"d670a1d9-c6a7-44c8-969b-168725f674cb\" (UID: \"d670a1d9-c6a7-44c8-969b-168725f674cb\") " Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.105012 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d670a1d9-c6a7-44c8-969b-168725f674cb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d670a1d9-c6a7-44c8-969b-168725f674cb" (UID: "d670a1d9-c6a7-44c8-969b-168725f674cb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.107682 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d670a1d9-c6a7-44c8-969b-168725f674cb-kube-api-access-fx4rz" (OuterVolumeSpecName: "kube-api-access-fx4rz") pod "d670a1d9-c6a7-44c8-969b-168725f674cb" (UID: "d670a1d9-c6a7-44c8-969b-168725f674cb"). InnerVolumeSpecName "kube-api-access-fx4rz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.205520 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fx4rz\" (UniqueName: \"kubernetes.io/projected/d670a1d9-c6a7-44c8-969b-168725f674cb-kube-api-access-fx4rz\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.205561 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d670a1d9-c6a7-44c8-969b-168725f674cb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.500865 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.560190 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-z4slr" Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.560973 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-z4slr" event={"ID":"d670a1d9-c6a7-44c8-969b-168725f674cb","Type":"ContainerDied","Data":"bd768fb1c9f27dc6af49c90953cb4d036dd1f3cea834435ae861212b4a9039fd"} Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.561015 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd768fb1c9f27dc6af49c90953cb4d036dd1f3cea834435ae861212b4a9039fd" Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.606296 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-2jsgn"] Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.606583 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" podUID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" containerName="dnsmasq-dns" containerID="cri-o://8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df" gracePeriod=10 Jan 22 05:35:11 crc kubenswrapper[4814]: I0122 05:35:11.897754 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.023579 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1607a334-0e23-4696-8a95-e364d28fca56-operator-scripts\") pod \"1607a334-0e23-4696-8a95-e364d28fca56\" (UID: \"1607a334-0e23-4696-8a95-e364d28fca56\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.023759 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2swp6\" (UniqueName: \"kubernetes.io/projected/1607a334-0e23-4696-8a95-e364d28fca56-kube-api-access-2swp6\") pod \"1607a334-0e23-4696-8a95-e364d28fca56\" (UID: \"1607a334-0e23-4696-8a95-e364d28fca56\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.024660 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1607a334-0e23-4696-8a95-e364d28fca56-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1607a334-0e23-4696-8a95-e364d28fca56" (UID: "1607a334-0e23-4696-8a95-e364d28fca56"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.031818 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1607a334-0e23-4696-8a95-e364d28fca56-kube-api-access-2swp6" (OuterVolumeSpecName: "kube-api-access-2swp6") pod "1607a334-0e23-4696-8a95-e364d28fca56" (UID: "1607a334-0e23-4696-8a95-e364d28fca56"). InnerVolumeSpecName "kube-api-access-2swp6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.126515 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1607a334-0e23-4696-8a95-e364d28fca56-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.126555 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2swp6\" (UniqueName: \"kubernetes.io/projected/1607a334-0e23-4696-8a95-e364d28fca56-kube-api-access-2swp6\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.234401 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-z4slr"] Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.251337 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-z4slr"] Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.313400 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.336851 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.343701 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.355168 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.357046 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d670a1d9-c6a7-44c8-969b-168725f674cb" path="/var/lib/kubelet/pods/d670a1d9-c6a7-44c8-969b-168725f674cb/volumes" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.438278 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsph2\" (UniqueName: \"kubernetes.io/projected/b21045ad-7166-4a40-abb8-8337cbcb1220-kube-api-access-qsph2\") pod \"b21045ad-7166-4a40-abb8-8337cbcb1220\" (UID: \"b21045ad-7166-4a40-abb8-8337cbcb1220\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.438475 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d7b817-7906-48c6-ac41-d277d095531c-operator-scripts\") pod \"36d7b817-7906-48c6-ac41-d277d095531c\" (UID: \"36d7b817-7906-48c6-ac41-d277d095531c\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.438602 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twxnm\" (UniqueName: \"kubernetes.io/projected/36d7b817-7906-48c6-ac41-d277d095531c-kube-api-access-twxnm\") pod \"36d7b817-7906-48c6-ac41-d277d095531c\" (UID: \"36d7b817-7906-48c6-ac41-d277d095531c\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.438665 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b21045ad-7166-4a40-abb8-8337cbcb1220-operator-scripts\") pod \"b21045ad-7166-4a40-abb8-8337cbcb1220\" (UID: \"b21045ad-7166-4a40-abb8-8337cbcb1220\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.438972 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36d7b817-7906-48c6-ac41-d277d095531c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "36d7b817-7906-48c6-ac41-d277d095531c" (UID: "36d7b817-7906-48c6-ac41-d277d095531c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.439145 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36d7b817-7906-48c6-ac41-d277d095531c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.439381 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b21045ad-7166-4a40-abb8-8337cbcb1220-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b21045ad-7166-4a40-abb8-8337cbcb1220" (UID: "b21045ad-7166-4a40-abb8-8337cbcb1220"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.442417 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36d7b817-7906-48c6-ac41-d277d095531c-kube-api-access-twxnm" (OuterVolumeSpecName: "kube-api-access-twxnm") pod "36d7b817-7906-48c6-ac41-d277d095531c" (UID: "36d7b817-7906-48c6-ac41-d277d095531c"). InnerVolumeSpecName "kube-api-access-twxnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.443415 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b21045ad-7166-4a40-abb8-8337cbcb1220-kube-api-access-qsph2" (OuterVolumeSpecName: "kube-api-access-qsph2") pod "b21045ad-7166-4a40-abb8-8337cbcb1220" (UID: "b21045ad-7166-4a40-abb8-8337cbcb1220"). InnerVolumeSpecName "kube-api-access-qsph2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540190 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-nb\") pod \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540237 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-dns-svc\") pod \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540328 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w427b\" (UniqueName: \"kubernetes.io/projected/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-kube-api-access-w427b\") pod \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\" (UID: \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540380 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-sb\") pod \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540415 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-operator-scripts\") pod \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\" (UID: \"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540463 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8srwl\" (UniqueName: \"kubernetes.io/projected/d604dc72-ca2a-4ef5-bed1-07a12ce10183-kube-api-access-8srwl\") pod \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540530 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-config\") pod \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\" (UID: \"d604dc72-ca2a-4ef5-bed1-07a12ce10183\") " Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540859 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsph2\" (UniqueName: \"kubernetes.io/projected/b21045ad-7166-4a40-abb8-8337cbcb1220-kube-api-access-qsph2\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540870 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twxnm\" (UniqueName: \"kubernetes.io/projected/36d7b817-7906-48c6-ac41-d277d095531c-kube-api-access-twxnm\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.540881 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b21045ad-7166-4a40-abb8-8337cbcb1220-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.541698 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fc87bfbf-1c0b-4502-8ad7-8913a6099bf0" (UID: "fc87bfbf-1c0b-4502-8ad7-8913a6099bf0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.543713 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d604dc72-ca2a-4ef5-bed1-07a12ce10183-kube-api-access-8srwl" (OuterVolumeSpecName: "kube-api-access-8srwl") pod "d604dc72-ca2a-4ef5-bed1-07a12ce10183" (UID: "d604dc72-ca2a-4ef5-bed1-07a12ce10183"). InnerVolumeSpecName "kube-api-access-8srwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.546001 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-kube-api-access-w427b" (OuterVolumeSpecName: "kube-api-access-w427b") pod "fc87bfbf-1c0b-4502-8ad7-8913a6099bf0" (UID: "fc87bfbf-1c0b-4502-8ad7-8913a6099bf0"). InnerVolumeSpecName "kube-api-access-w427b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.568320 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4c34-account-create-update-bjrjx" event={"ID":"1607a334-0e23-4696-8a95-e364d28fca56","Type":"ContainerDied","Data":"072c5a97fe8a37bbb018b19c6dfec45560143315f2f70b2c4c615a525461bb22"} Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.568364 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="072c5a97fe8a37bbb018b19c6dfec45560143315f2f70b2c4c615a525461bb22" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.568615 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4c34-account-create-update-bjrjx" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.569849 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-533f-account-create-update-w8ktx" event={"ID":"36d7b817-7906-48c6-ac41-d277d095531c","Type":"ContainerDied","Data":"eacb0ab77be76f1b35ac798ffc8dc46a3db5497bf1a8b4396daade6918b8d422"} Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.569886 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eacb0ab77be76f1b35ac798ffc8dc46a3db5497bf1a8b4396daade6918b8d422" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.569887 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-533f-account-create-update-w8ktx" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.571149 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-b8mdj" event={"ID":"b21045ad-7166-4a40-abb8-8337cbcb1220","Type":"ContainerDied","Data":"beb69aacf0c0ef254e267d36f615db2275cdc776bc716dd3f41079cceb4b4960"} Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.571168 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="beb69aacf0c0ef254e267d36f615db2275cdc776bc716dd3f41079cceb4b4960" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.571219 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-b8mdj" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.574334 4814 generic.go:334] "Generic (PLEG): container finished" podID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" containerID="8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df" exitCode=0 Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.574581 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.575324 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" event={"ID":"d604dc72-ca2a-4ef5-bed1-07a12ce10183","Type":"ContainerDied","Data":"8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df"} Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.575363 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-2jsgn" event={"ID":"d604dc72-ca2a-4ef5-bed1-07a12ce10183","Type":"ContainerDied","Data":"79dba71644577a0015de4b1da77c019f365ada1216a764204d17827dcbb489f4"} Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.575385 4814 scope.go:117] "RemoveContainer" containerID="8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.576786 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d604dc72-ca2a-4ef5-bed1-07a12ce10183" (UID: "d604dc72-ca2a-4ef5-bed1-07a12ce10183"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.580656 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6wgx7" event={"ID":"fc87bfbf-1c0b-4502-8ad7-8913a6099bf0","Type":"ContainerDied","Data":"fb968f7bc8b94199fce6127a58e92c93abde0c1b2bd5f2f5d61eec8dea208e1d"} Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.580771 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb968f7bc8b94199fce6127a58e92c93abde0c1b2bd5f2f5d61eec8dea208e1d" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.580882 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6wgx7" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.582915 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-config" (OuterVolumeSpecName: "config") pod "d604dc72-ca2a-4ef5-bed1-07a12ce10183" (UID: "d604dc72-ca2a-4ef5-bed1-07a12ce10183"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.598948 4814 scope.go:117] "RemoveContainer" containerID="716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.600294 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d604dc72-ca2a-4ef5-bed1-07a12ce10183" (UID: "d604dc72-ca2a-4ef5-bed1-07a12ce10183"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.614479 4814 scope.go:117] "RemoveContainer" containerID="8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df" Jan 22 05:35:12 crc kubenswrapper[4814]: E0122 05:35:12.615175 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df\": container with ID starting with 8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df not found: ID does not exist" containerID="8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.615205 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df"} err="failed to get container status \"8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df\": rpc error: code = NotFound desc = could not find container \"8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df\": container with ID starting with 8b3d5f23d0ea51d2df9e5d5fad04b1a611af3a27133eaafb4e87e272596986df not found: ID does not exist" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.615226 4814 scope.go:117] "RemoveContainer" containerID="716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a" Jan 22 05:35:12 crc kubenswrapper[4814]: E0122 05:35:12.615635 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a\": container with ID starting with 716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a not found: ID does not exist" containerID="716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.615657 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a"} err="failed to get container status \"716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a\": rpc error: code = NotFound desc = could not find container \"716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a\": container with ID starting with 716d1d10c5f06ff44339e8e2e3908cc1e7b4a5fe151ad239ccf304488afe2a6a not found: ID does not exist" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.616818 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d604dc72-ca2a-4ef5-bed1-07a12ce10183" (UID: "d604dc72-ca2a-4ef5-bed1-07a12ce10183"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.642460 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.642493 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.642503 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8srwl\" (UniqueName: \"kubernetes.io/projected/d604dc72-ca2a-4ef5-bed1-07a12ce10183-kube-api-access-8srwl\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.642514 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.642522 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.642531 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d604dc72-ca2a-4ef5-bed1-07a12ce10183-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.642540 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w427b\" (UniqueName: \"kubernetes.io/projected/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0-kube-api-access-w427b\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.906784 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-2jsgn"] Jan 22 05:35:12 crc kubenswrapper[4814]: I0122 05:35:12.911800 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-2jsgn"] Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.932344 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-h6rcw"] Jan 22 05:35:13 crc kubenswrapper[4814]: E0122 05:35:13.933222 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d670a1d9-c6a7-44c8-969b-168725f674cb" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933242 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d670a1d9-c6a7-44c8-969b-168725f674cb" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: E0122 05:35:13.933275 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36d7b817-7906-48c6-ac41-d277d095531c" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933281 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="36d7b817-7906-48c6-ac41-d277d095531c" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: E0122 05:35:13.933303 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1607a334-0e23-4696-8a95-e364d28fca56" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933308 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="1607a334-0e23-4696-8a95-e364d28fca56" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: E0122 05:35:13.933324 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b21045ad-7166-4a40-abb8-8337cbcb1220" containerName="mariadb-database-create" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933331 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b21045ad-7166-4a40-abb8-8337cbcb1220" containerName="mariadb-database-create" Jan 22 05:35:13 crc kubenswrapper[4814]: E0122 05:35:13.933345 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" containerName="dnsmasq-dns" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933351 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" containerName="dnsmasq-dns" Jan 22 05:35:13 crc kubenswrapper[4814]: E0122 05:35:13.933381 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" containerName="init" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933387 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" containerName="init" Jan 22 05:35:13 crc kubenswrapper[4814]: E0122 05:35:13.933405 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc87bfbf-1c0b-4502-8ad7-8913a6099bf0" containerName="mariadb-database-create" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933411 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc87bfbf-1c0b-4502-8ad7-8913a6099bf0" containerName="mariadb-database-create" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933745 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b21045ad-7166-4a40-abb8-8337cbcb1220" containerName="mariadb-database-create" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933766 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="1607a334-0e23-4696-8a95-e364d28fca56" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933787 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d670a1d9-c6a7-44c8-969b-168725f674cb" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933799 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc87bfbf-1c0b-4502-8ad7-8913a6099bf0" containerName="mariadb-database-create" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933808 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" containerName="dnsmasq-dns" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.933819 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="36d7b817-7906-48c6-ac41-d277d095531c" containerName="mariadb-account-create-update" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.934518 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.948054 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-ecb3-account-create-update-72l64"] Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.950532 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.958267 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.958329 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ecb3-account-create-update-72l64"] Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.964216 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrfsq\" (UniqueName: \"kubernetes.io/projected/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-kube-api-access-jrfsq\") pod \"glance-db-create-h6rcw\" (UID: \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\") " pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.964287 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9pxd\" (UniqueName: \"kubernetes.io/projected/ec22ee17-7d0d-45fe-9059-0d8f059ee212-kube-api-access-v9pxd\") pod \"glance-ecb3-account-create-update-72l64\" (UID: \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\") " pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.964343 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec22ee17-7d0d-45fe-9059-0d8f059ee212-operator-scripts\") pod \"glance-ecb3-account-create-update-72l64\" (UID: \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\") " pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.964385 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-operator-scripts\") pod \"glance-db-create-h6rcw\" (UID: \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\") " pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:13 crc kubenswrapper[4814]: I0122 05:35:13.971374 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-h6rcw"] Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.065427 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrfsq\" (UniqueName: \"kubernetes.io/projected/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-kube-api-access-jrfsq\") pod \"glance-db-create-h6rcw\" (UID: \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\") " pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.065481 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9pxd\" (UniqueName: \"kubernetes.io/projected/ec22ee17-7d0d-45fe-9059-0d8f059ee212-kube-api-access-v9pxd\") pod \"glance-ecb3-account-create-update-72l64\" (UID: \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\") " pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.065531 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec22ee17-7d0d-45fe-9059-0d8f059ee212-operator-scripts\") pod \"glance-ecb3-account-create-update-72l64\" (UID: \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\") " pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.065565 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-operator-scripts\") pod \"glance-db-create-h6rcw\" (UID: \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\") " pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.066248 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-operator-scripts\") pod \"glance-db-create-h6rcw\" (UID: \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\") " pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.066443 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec22ee17-7d0d-45fe-9059-0d8f059ee212-operator-scripts\") pod \"glance-ecb3-account-create-update-72l64\" (UID: \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\") " pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.082297 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrfsq\" (UniqueName: \"kubernetes.io/projected/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-kube-api-access-jrfsq\") pod \"glance-db-create-h6rcw\" (UID: \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\") " pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.117127 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9pxd\" (UniqueName: \"kubernetes.io/projected/ec22ee17-7d0d-45fe-9059-0d8f059ee212-kube-api-access-v9pxd\") pod \"glance-ecb3-account-create-update-72l64\" (UID: \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\") " pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.281023 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.296379 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5ll9n" podUID="c9c3c821-607f-4f2d-8b28-ae58bce1864d" containerName="ovn-controller" probeResult="failure" output=< Jan 22 05:35:14 crc kubenswrapper[4814]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 22 05:35:14 crc kubenswrapper[4814]: > Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.300803 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.356646 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d604dc72-ca2a-4ef5-bed1-07a12ce10183" path="/var/lib/kubelet/pods/d604dc72-ca2a-4ef5-bed1-07a12ce10183/volumes" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.426799 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.442318 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-l9d6w" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.694140 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5ll9n-config-87jr4"] Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.695228 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.697651 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.701794 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5ll9n-config-87jr4"] Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.845830 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-h6rcw"] Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.883884 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run-ovn\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.883944 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-scripts\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.883996 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-additional-scripts\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.884027 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-log-ovn\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.884092 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.884120 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76wnd\" (UniqueName: \"kubernetes.io/projected/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-kube-api-access-76wnd\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.928926 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-ecb3-account-create-update-72l64"] Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.985620 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-log-ovn\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.985941 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.985962 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-log-ovn\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.985973 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76wnd\" (UniqueName: \"kubernetes.io/projected/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-kube-api-access-76wnd\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.986382 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.986436 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run-ovn\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.986477 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-scripts\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.986521 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-additional-scripts\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.986572 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run-ovn\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.987247 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-additional-scripts\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:14 crc kubenswrapper[4814]: I0122 05:35:14.988400 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-scripts\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.009804 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76wnd\" (UniqueName: \"kubernetes.io/projected/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-kube-api-access-76wnd\") pod \"ovn-controller-5ll9n-config-87jr4\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.027070 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.539135 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5ll9n-config-87jr4"] Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.615608 4814 generic.go:334] "Generic (PLEG): container finished" podID="508ac5a0-f0fa-4e7c-bb63-1fac194d7545" containerID="676df7333b92940ecaed8508f407a997766a031d45a76178f1feea9c4fbc2141" exitCode=0 Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.615803 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h6rcw" event={"ID":"508ac5a0-f0fa-4e7c-bb63-1fac194d7545","Type":"ContainerDied","Data":"676df7333b92940ecaed8508f407a997766a031d45a76178f1feea9c4fbc2141"} Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.615870 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h6rcw" event={"ID":"508ac5a0-f0fa-4e7c-bb63-1fac194d7545","Type":"ContainerStarted","Data":"253210bab4bc680d08f9e5abe37a41ebd24a6aa40f5d00a3e6ce5d35504c2f40"} Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.623077 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5ll9n-config-87jr4" event={"ID":"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02","Type":"ContainerStarted","Data":"79710838e4be8add13bfe179e15375d5234aab81ea8f479807300026ed17e445"} Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.626409 4814 generic.go:334] "Generic (PLEG): container finished" podID="ec22ee17-7d0d-45fe-9059-0d8f059ee212" containerID="cff5bfa1fc1c89ade652bfadd0539603e46ff841066b9f5603b7c8b2790bd51e" exitCode=0 Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.626479 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ecb3-account-create-update-72l64" event={"ID":"ec22ee17-7d0d-45fe-9059-0d8f059ee212","Type":"ContainerDied","Data":"cff5bfa1fc1c89ade652bfadd0539603e46ff841066b9f5603b7c8b2790bd51e"} Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.626865 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ecb3-account-create-update-72l64" event={"ID":"ec22ee17-7d0d-45fe-9059-0d8f059ee212","Type":"ContainerStarted","Data":"e984d63254315f5822b01f36b4d1e590d1b8605ae529df98d7008002b8dd608e"} Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.703555 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-8f4xw"] Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.704577 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.708134 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.718602 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8f4xw"] Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.901098 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdf4f08a-1961-42d8-9130-2411f107eb78-operator-scripts\") pod \"root-account-create-update-8f4xw\" (UID: \"cdf4f08a-1961-42d8-9130-2411f107eb78\") " pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:15 crc kubenswrapper[4814]: I0122 05:35:15.901394 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzwgl\" (UniqueName: \"kubernetes.io/projected/cdf4f08a-1961-42d8-9130-2411f107eb78-kube-api-access-nzwgl\") pod \"root-account-create-update-8f4xw\" (UID: \"cdf4f08a-1961-42d8-9130-2411f107eb78\") " pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.005039 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdf4f08a-1961-42d8-9130-2411f107eb78-operator-scripts\") pod \"root-account-create-update-8f4xw\" (UID: \"cdf4f08a-1961-42d8-9130-2411f107eb78\") " pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.005123 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzwgl\" (UniqueName: \"kubernetes.io/projected/cdf4f08a-1961-42d8-9130-2411f107eb78-kube-api-access-nzwgl\") pod \"root-account-create-update-8f4xw\" (UID: \"cdf4f08a-1961-42d8-9130-2411f107eb78\") " pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.014455 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdf4f08a-1961-42d8-9130-2411f107eb78-operator-scripts\") pod \"root-account-create-update-8f4xw\" (UID: \"cdf4f08a-1961-42d8-9130-2411f107eb78\") " pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.040528 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzwgl\" (UniqueName: \"kubernetes.io/projected/cdf4f08a-1961-42d8-9130-2411f107eb78-kube-api-access-nzwgl\") pod \"root-account-create-update-8f4xw\" (UID: \"cdf4f08a-1961-42d8-9130-2411f107eb78\") " pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.324356 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.635540 4814 generic.go:334] "Generic (PLEG): container finished" podID="9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" containerID="78e6f2bcae629b7dc179ad9cc64009fba1bf19de277d94dd338394c5884102c6" exitCode=0 Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.635586 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5ll9n-config-87jr4" event={"ID":"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02","Type":"ContainerDied","Data":"78e6f2bcae629b7dc179ad9cc64009fba1bf19de277d94dd338394c5884102c6"} Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.637378 4814 generic.go:334] "Generic (PLEG): container finished" podID="657e06e5-a5ca-4104-bc6c-12c31d9a1984" containerID="c09342ec01b5ffc2a1e34a782fc4a5dd16705b2dbf951d681f6d60c33755fe2c" exitCode=0 Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.637470 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xhppx" event={"ID":"657e06e5-a5ca-4104-bc6c-12c31d9a1984","Type":"ContainerDied","Data":"c09342ec01b5ffc2a1e34a782fc4a5dd16705b2dbf951d681f6d60c33755fe2c"} Jan 22 05:35:16 crc kubenswrapper[4814]: I0122 05:35:16.783144 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8f4xw"] Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.055450 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.059331 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.221179 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-operator-scripts\") pod \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\" (UID: \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\") " Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.221251 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec22ee17-7d0d-45fe-9059-0d8f059ee212-operator-scripts\") pod \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\" (UID: \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\") " Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.221280 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrfsq\" (UniqueName: \"kubernetes.io/projected/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-kube-api-access-jrfsq\") pod \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\" (UID: \"508ac5a0-f0fa-4e7c-bb63-1fac194d7545\") " Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.221422 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9pxd\" (UniqueName: \"kubernetes.io/projected/ec22ee17-7d0d-45fe-9059-0d8f059ee212-kube-api-access-v9pxd\") pod \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\" (UID: \"ec22ee17-7d0d-45fe-9059-0d8f059ee212\") " Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.222575 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "508ac5a0-f0fa-4e7c-bb63-1fac194d7545" (UID: "508ac5a0-f0fa-4e7c-bb63-1fac194d7545"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.222907 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec22ee17-7d0d-45fe-9059-0d8f059ee212-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ec22ee17-7d0d-45fe-9059-0d8f059ee212" (UID: "ec22ee17-7d0d-45fe-9059-0d8f059ee212"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.227014 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-kube-api-access-jrfsq" (OuterVolumeSpecName: "kube-api-access-jrfsq") pod "508ac5a0-f0fa-4e7c-bb63-1fac194d7545" (UID: "508ac5a0-f0fa-4e7c-bb63-1fac194d7545"). InnerVolumeSpecName "kube-api-access-jrfsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.230752 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec22ee17-7d0d-45fe-9059-0d8f059ee212-kube-api-access-v9pxd" (OuterVolumeSpecName: "kube-api-access-v9pxd") pod "ec22ee17-7d0d-45fe-9059-0d8f059ee212" (UID: "ec22ee17-7d0d-45fe-9059-0d8f059ee212"). InnerVolumeSpecName "kube-api-access-v9pxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.322920 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9pxd\" (UniqueName: \"kubernetes.io/projected/ec22ee17-7d0d-45fe-9059-0d8f059ee212-kube-api-access-v9pxd\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.322959 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.322969 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ec22ee17-7d0d-45fe-9059-0d8f059ee212-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.322978 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrfsq\" (UniqueName: \"kubernetes.io/projected/508ac5a0-f0fa-4e7c-bb63-1fac194d7545-kube-api-access-jrfsq\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.652926 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-h6rcw" event={"ID":"508ac5a0-f0fa-4e7c-bb63-1fac194d7545","Type":"ContainerDied","Data":"253210bab4bc680d08f9e5abe37a41ebd24a6aa40f5d00a3e6ce5d35504c2f40"} Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.652974 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="253210bab4bc680d08f9e5abe37a41ebd24a6aa40f5d00a3e6ce5d35504c2f40" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.653286 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-h6rcw" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.655167 4814 generic.go:334] "Generic (PLEG): container finished" podID="cdf4f08a-1961-42d8-9130-2411f107eb78" containerID="db183127b8c5203be8cd0eb630502e22866d8a01d857e36bccf6c1af69f3b78f" exitCode=0 Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.655306 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8f4xw" event={"ID":"cdf4f08a-1961-42d8-9130-2411f107eb78","Type":"ContainerDied","Data":"db183127b8c5203be8cd0eb630502e22866d8a01d857e36bccf6c1af69f3b78f"} Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.655365 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8f4xw" event={"ID":"cdf4f08a-1961-42d8-9130-2411f107eb78","Type":"ContainerStarted","Data":"c70b3b464b4db7f082dce4ea487a8235b652639f49afd6842f5194cd03191b76"} Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.657282 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-ecb3-account-create-update-72l64" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.657287 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-ecb3-account-create-update-72l64" event={"ID":"ec22ee17-7d0d-45fe-9059-0d8f059ee212","Type":"ContainerDied","Data":"e984d63254315f5822b01f36b4d1e590d1b8605ae529df98d7008002b8dd608e"} Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.657338 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e984d63254315f5822b01f36b4d1e590d1b8605ae529df98d7008002b8dd608e" Jan 22 05:35:17 crc kubenswrapper[4814]: I0122 05:35:17.982132 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.127501 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.135736 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-log-ovn\") pod \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.135823 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run-ovn\") pod \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.135858 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run\") pod \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.135895 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-scripts\") pod \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.135901 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" (UID: "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.135921 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" (UID: "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.135947 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run" (OuterVolumeSpecName: "var-run") pod "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" (UID: "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.136001 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-additional-scripts\") pod \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.136061 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76wnd\" (UniqueName: \"kubernetes.io/projected/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-kube-api-access-76wnd\") pod \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\" (UID: \"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.136379 4814 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.136390 4814 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.136397 4814 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-var-run\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.136676 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" (UID: "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.136913 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-scripts" (OuterVolumeSpecName: "scripts") pod "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" (UID: "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.139815 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-kube-api-access-76wnd" (OuterVolumeSpecName: "kube-api-access-76wnd") pod "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" (UID: "9ae1b7cd-6860-45e1-9ba3-e3f87446cd02"). InnerVolumeSpecName "kube-api-access-76wnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237116 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-ring-data-devices\") pod \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237173 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-dispersionconf\") pod \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237244 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/657e06e5-a5ca-4104-bc6c-12c31d9a1984-etc-swift\") pod \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237270 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-combined-ca-bundle\") pod \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237321 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-scripts\") pod \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237338 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-swiftconf\") pod \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237364 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfqfm\" (UniqueName: \"kubernetes.io/projected/657e06e5-a5ca-4104-bc6c-12c31d9a1984-kube-api-access-zfqfm\") pod \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\" (UID: \"657e06e5-a5ca-4104-bc6c-12c31d9a1984\") " Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237696 4814 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237711 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76wnd\" (UniqueName: \"kubernetes.io/projected/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-kube-api-access-76wnd\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.237727 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.238198 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "657e06e5-a5ca-4104-bc6c-12c31d9a1984" (UID: "657e06e5-a5ca-4104-bc6c-12c31d9a1984"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.238210 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/657e06e5-a5ca-4104-bc6c-12c31d9a1984-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "657e06e5-a5ca-4104-bc6c-12c31d9a1984" (UID: "657e06e5-a5ca-4104-bc6c-12c31d9a1984"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.242942 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/657e06e5-a5ca-4104-bc6c-12c31d9a1984-kube-api-access-zfqfm" (OuterVolumeSpecName: "kube-api-access-zfqfm") pod "657e06e5-a5ca-4104-bc6c-12c31d9a1984" (UID: "657e06e5-a5ca-4104-bc6c-12c31d9a1984"). InnerVolumeSpecName "kube-api-access-zfqfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.243429 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "657e06e5-a5ca-4104-bc6c-12c31d9a1984" (UID: "657e06e5-a5ca-4104-bc6c-12c31d9a1984"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.255338 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-scripts" (OuterVolumeSpecName: "scripts") pod "657e06e5-a5ca-4104-bc6c-12c31d9a1984" (UID: "657e06e5-a5ca-4104-bc6c-12c31d9a1984"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.256467 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "657e06e5-a5ca-4104-bc6c-12c31d9a1984" (UID: "657e06e5-a5ca-4104-bc6c-12c31d9a1984"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.263313 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "657e06e5-a5ca-4104-bc6c-12c31d9a1984" (UID: "657e06e5-a5ca-4104-bc6c-12c31d9a1984"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.339654 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.339684 4814 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.339696 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfqfm\" (UniqueName: \"kubernetes.io/projected/657e06e5-a5ca-4104-bc6c-12c31d9a1984-kube-api-access-zfqfm\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.339707 4814 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/657e06e5-a5ca-4104-bc6c-12c31d9a1984-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.339716 4814 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.339726 4814 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/657e06e5-a5ca-4104-bc6c-12c31d9a1984-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.339735 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657e06e5-a5ca-4104-bc6c-12c31d9a1984-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.441532 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.462501 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e7eb7182-d869-4625-99e2-6abc75aee22d-etc-swift\") pod \"swift-storage-0\" (UID: \"e7eb7182-d869-4625-99e2-6abc75aee22d\") " pod="openstack/swift-storage-0" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.476047 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.667232 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5ll9n-config-87jr4" event={"ID":"9ae1b7cd-6860-45e1-9ba3-e3f87446cd02","Type":"ContainerDied","Data":"79710838e4be8add13bfe179e15375d5234aab81ea8f479807300026ed17e445"} Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.667272 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79710838e4be8add13bfe179e15375d5234aab81ea8f479807300026ed17e445" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.667341 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5ll9n-config-87jr4" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.669015 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-xhppx" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.669378 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-xhppx" event={"ID":"657e06e5-a5ca-4104-bc6c-12c31d9a1984","Type":"ContainerDied","Data":"8dcf0fef3925eb88ba426da17108d5652ad42f7bd550249ee8feaf4666947d91"} Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.669444 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8dcf0fef3925eb88ba426da17108d5652ad42f7bd550249ee8feaf4666947d91" Jan 22 05:35:18 crc kubenswrapper[4814]: I0122 05:35:18.996304 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.042329 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.106863 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-5ll9n-config-87jr4"] Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.107246 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-5ll9n-config-87jr4"] Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.158094 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdf4f08a-1961-42d8-9130-2411f107eb78-operator-scripts\") pod \"cdf4f08a-1961-42d8-9130-2411f107eb78\" (UID: \"cdf4f08a-1961-42d8-9130-2411f107eb78\") " Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.159323 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwgl\" (UniqueName: \"kubernetes.io/projected/cdf4f08a-1961-42d8-9130-2411f107eb78-kube-api-access-nzwgl\") pod \"cdf4f08a-1961-42d8-9130-2411f107eb78\" (UID: \"cdf4f08a-1961-42d8-9130-2411f107eb78\") " Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.159507 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdf4f08a-1961-42d8-9130-2411f107eb78-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cdf4f08a-1961-42d8-9130-2411f107eb78" (UID: "cdf4f08a-1961-42d8-9130-2411f107eb78"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.160146 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdf4f08a-1961-42d8-9130-2411f107eb78-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.165726 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdf4f08a-1961-42d8-9130-2411f107eb78-kube-api-access-nzwgl" (OuterVolumeSpecName: "kube-api-access-nzwgl") pod "cdf4f08a-1961-42d8-9130-2411f107eb78" (UID: "cdf4f08a-1961-42d8-9130-2411f107eb78"). InnerVolumeSpecName "kube-api-access-nzwgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.196400 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-5ll9n" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.264719 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwgl\" (UniqueName: \"kubernetes.io/projected/cdf4f08a-1961-42d8-9130-2411f107eb78-kube-api-access-nzwgl\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276126 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-st6qn"] Jan 22 05:35:19 crc kubenswrapper[4814]: E0122 05:35:19.276404 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="508ac5a0-f0fa-4e7c-bb63-1fac194d7545" containerName="mariadb-database-create" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276420 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="508ac5a0-f0fa-4e7c-bb63-1fac194d7545" containerName="mariadb-database-create" Jan 22 05:35:19 crc kubenswrapper[4814]: E0122 05:35:19.276438 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="657e06e5-a5ca-4104-bc6c-12c31d9a1984" containerName="swift-ring-rebalance" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276444 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="657e06e5-a5ca-4104-bc6c-12c31d9a1984" containerName="swift-ring-rebalance" Jan 22 05:35:19 crc kubenswrapper[4814]: E0122 05:35:19.276455 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" containerName="ovn-config" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276461 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" containerName="ovn-config" Jan 22 05:35:19 crc kubenswrapper[4814]: E0122 05:35:19.276479 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdf4f08a-1961-42d8-9130-2411f107eb78" containerName="mariadb-account-create-update" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276485 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdf4f08a-1961-42d8-9130-2411f107eb78" containerName="mariadb-account-create-update" Jan 22 05:35:19 crc kubenswrapper[4814]: E0122 05:35:19.276499 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec22ee17-7d0d-45fe-9059-0d8f059ee212" containerName="mariadb-account-create-update" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276505 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec22ee17-7d0d-45fe-9059-0d8f059ee212" containerName="mariadb-account-create-update" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276656 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" containerName="ovn-config" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276671 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="657e06e5-a5ca-4104-bc6c-12c31d9a1984" containerName="swift-ring-rebalance" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276679 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdf4f08a-1961-42d8-9130-2411f107eb78" containerName="mariadb-account-create-update" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276688 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec22ee17-7d0d-45fe-9059-0d8f059ee212" containerName="mariadb-account-create-update" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.276697 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="508ac5a0-f0fa-4e7c-bb63-1fac194d7545" containerName="mariadb-database-create" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.277173 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.279874 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-jnz8l" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.279935 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.300417 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-st6qn"] Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.366170 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-db-sync-config-data\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.366397 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-combined-ca-bundle\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.366439 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlnl6\" (UniqueName: \"kubernetes.io/projected/65c32786-ef8f-4498-aaef-4ec1dcebc57d-kube-api-access-mlnl6\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.366487 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-config-data\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.467950 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-combined-ca-bundle\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.468008 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlnl6\" (UniqueName: \"kubernetes.io/projected/65c32786-ef8f-4498-aaef-4ec1dcebc57d-kube-api-access-mlnl6\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.468043 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-config-data\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.468124 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-db-sync-config-data\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.473317 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-config-data\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.473717 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-combined-ca-bundle\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.483062 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-db-sync-config-data\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.489968 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlnl6\" (UniqueName: \"kubernetes.io/projected/65c32786-ef8f-4498-aaef-4ec1dcebc57d-kube-api-access-mlnl6\") pod \"glance-db-sync-st6qn\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.597060 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.678355 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8f4xw" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.678808 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8f4xw" event={"ID":"cdf4f08a-1961-42d8-9130-2411f107eb78","Type":"ContainerDied","Data":"c70b3b464b4db7f082dce4ea487a8235b652639f49afd6842f5194cd03191b76"} Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.678857 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c70b3b464b4db7f082dce4ea487a8235b652639f49afd6842f5194cd03191b76" Jan 22 05:35:19 crc kubenswrapper[4814]: I0122 05:35:19.680041 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"05af6d3f96024df68a47c9bccc8f1231fdd948eee8dc6ee84e81aef93fba891a"} Jan 22 05:35:20 crc kubenswrapper[4814]: I0122 05:35:20.329030 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-st6qn"] Jan 22 05:35:20 crc kubenswrapper[4814]: W0122 05:35:20.334165 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65c32786_ef8f_4498_aaef_4ec1dcebc57d.slice/crio-1a831592faed72613c9ad4cb5cb5d8de16350987d6a9ca28338deffd776134f1 WatchSource:0}: Error finding container 1a831592faed72613c9ad4cb5cb5d8de16350987d6a9ca28338deffd776134f1: Status 404 returned error can't find the container with id 1a831592faed72613c9ad4cb5cb5d8de16350987d6a9ca28338deffd776134f1 Jan 22 05:35:20 crc kubenswrapper[4814]: I0122 05:35:20.376918 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ae1b7cd-6860-45e1-9ba3-e3f87446cd02" path="/var/lib/kubelet/pods/9ae1b7cd-6860-45e1-9ba3-e3f87446cd02/volumes" Jan 22 05:35:20 crc kubenswrapper[4814]: I0122 05:35:20.687367 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-st6qn" event={"ID":"65c32786-ef8f-4498-aaef-4ec1dcebc57d","Type":"ContainerStarted","Data":"1a831592faed72613c9ad4cb5cb5d8de16350987d6a9ca28338deffd776134f1"} Jan 22 05:35:20 crc kubenswrapper[4814]: I0122 05:35:20.689234 4814 generic.go:334] "Generic (PLEG): container finished" podID="22c14c36-2eb5-424d-a919-25f2e99eeb44" containerID="a859b6b6c8244733d3bfd805c35ab89852ef9de42452cfa756a80e1942fce6bc" exitCode=0 Jan 22 05:35:20 crc kubenswrapper[4814]: I0122 05:35:20.689276 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"22c14c36-2eb5-424d-a919-25f2e99eeb44","Type":"ContainerDied","Data":"a859b6b6c8244733d3bfd805c35ab89852ef9de42452cfa756a80e1942fce6bc"} Jan 22 05:35:20 crc kubenswrapper[4814]: I0122 05:35:20.692433 4814 generic.go:334] "Generic (PLEG): container finished" podID="14a83f70-2b64-417d-a198-d51bb829cea1" containerID="ba70de8318a9434d35facf4c16d1dbd28bb5e77cdb81af5035192ac0f65f5894" exitCode=0 Jan 22 05:35:20 crc kubenswrapper[4814]: I0122 05:35:20.692481 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"14a83f70-2b64-417d-a198-d51bb829cea1","Type":"ContainerDied","Data":"ba70de8318a9434d35facf4c16d1dbd28bb5e77cdb81af5035192ac0f65f5894"} Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.719024 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"08241b0a1d6e658d65c14f2b57a2d4782df0f4c6071bbbfc6bba828afc15b1fb"} Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.719069 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"da8b8bf8c6dc6c1dad35dc62198f0a8544668f365a6c58ebdaf8fea179f03407"} Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.719078 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"ca1932bba649c64f44b01784c3de37c06741d7eb644029af835ad4661f37459c"} Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.719086 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"9a95f73669ba56ec69a9a388037caa2d071968aaf4a77bf7c1bc5161d8542741"} Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.730748 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"22c14c36-2eb5-424d-a919-25f2e99eeb44","Type":"ContainerStarted","Data":"08cdcd6761743637a632ae626175a130c736560dbfb3feee844b790b598e0fb5"} Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.731768 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.744610 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"14a83f70-2b64-417d-a198-d51bb829cea1","Type":"ContainerStarted","Data":"97f257806b7f3681374dbe001c01a6167cab18ac3f035513fa06dd0adb750034"} Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.744811 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.756459 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.467894771 podStartE2EDuration="1m17.756447842s" podCreationTimestamp="2026-01-22 05:34:04 +0000 UTC" firstStartedPulling="2026-01-22 05:34:06.492548066 +0000 UTC m=+932.576036281" lastFinishedPulling="2026-01-22 05:34:47.781101097 +0000 UTC m=+973.864589352" observedRunningTime="2026-01-22 05:35:21.75477591 +0000 UTC m=+1007.838264125" watchObservedRunningTime="2026-01-22 05:35:21.756447842 +0000 UTC m=+1007.839936057" Jan 22 05:35:21 crc kubenswrapper[4814]: I0122 05:35:21.787253 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.62315131 podStartE2EDuration="1m17.787239827s" podCreationTimestamp="2026-01-22 05:34:04 +0000 UTC" firstStartedPulling="2026-01-22 05:34:06.614396429 +0000 UTC m=+932.697884644" lastFinishedPulling="2026-01-22 05:34:47.778484936 +0000 UTC m=+973.861973161" observedRunningTime="2026-01-22 05:35:21.782495999 +0000 UTC m=+1007.865984214" watchObservedRunningTime="2026-01-22 05:35:21.787239827 +0000 UTC m=+1007.870728042" Jan 22 05:35:22 crc kubenswrapper[4814]: I0122 05:35:22.253130 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-8f4xw"] Jan 22 05:35:22 crc kubenswrapper[4814]: I0122 05:35:22.258297 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-8f4xw"] Jan 22 05:35:22 crc kubenswrapper[4814]: I0122 05:35:22.353016 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdf4f08a-1961-42d8-9130-2411f107eb78" path="/var/lib/kubelet/pods/cdf4f08a-1961-42d8-9130-2411f107eb78/volumes" Jan 22 05:35:24 crc kubenswrapper[4814]: I0122 05:35:24.769321 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"d496fa13b3434fc4492f194ca3a62ac68665fe51a72d835097f731afee166f1d"} Jan 22 05:35:24 crc kubenswrapper[4814]: I0122 05:35:24.769807 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"eb42cc81b635989e2e0ad7022f2d07ba46e5bf9dee05c3c1e4ec4353ceebe743"} Jan 22 05:35:24 crc kubenswrapper[4814]: I0122 05:35:24.769818 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"1315ac4793a45eb7db810ec022c2e43751f959ab03ac2d2f4a018e8adb464315"} Jan 22 05:35:25 crc kubenswrapper[4814]: I0122 05:35:25.779466 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"95df13536f02cf2345e5eed7daab80fa5748a9d209b698723d96e2feee622cbb"} Jan 22 05:35:26 crc kubenswrapper[4814]: I0122 05:35:26.829025 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"5c68b782399ba850e56e5f46f41b8a67ff69f0000a202df6df0f26315e6c32e2"} Jan 22 05:35:26 crc kubenswrapper[4814]: I0122 05:35:26.829928 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"816bb3872996f16f31710a8818ced8855c5b5b68adb0ca098515719211417985"} Jan 22 05:35:26 crc kubenswrapper[4814]: I0122 05:35:26.830015 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"6f4c4b9b748b027568d78413352ba1dc5dd725ea04f57548744dd3707566b28a"} Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.279201 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-bl945"] Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.280303 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-bl945" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.286693 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.295414 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-bl945"] Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.390477 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc4gx\" (UniqueName: \"kubernetes.io/projected/729bdb41-4e49-4df6-a581-87fb0db6f3a0-kube-api-access-vc4gx\") pod \"root-account-create-update-bl945\" (UID: \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\") " pod="openstack/root-account-create-update-bl945" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.390552 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729bdb41-4e49-4df6-a581-87fb0db6f3a0-operator-scripts\") pod \"root-account-create-update-bl945\" (UID: \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\") " pod="openstack/root-account-create-update-bl945" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.492444 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc4gx\" (UniqueName: \"kubernetes.io/projected/729bdb41-4e49-4df6-a581-87fb0db6f3a0-kube-api-access-vc4gx\") pod \"root-account-create-update-bl945\" (UID: \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\") " pod="openstack/root-account-create-update-bl945" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.492507 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729bdb41-4e49-4df6-a581-87fb0db6f3a0-operator-scripts\") pod \"root-account-create-update-bl945\" (UID: \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\") " pod="openstack/root-account-create-update-bl945" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.493181 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729bdb41-4e49-4df6-a581-87fb0db6f3a0-operator-scripts\") pod \"root-account-create-update-bl945\" (UID: \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\") " pod="openstack/root-account-create-update-bl945" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.517547 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc4gx\" (UniqueName: \"kubernetes.io/projected/729bdb41-4e49-4df6-a581-87fb0db6f3a0-kube-api-access-vc4gx\") pod \"root-account-create-update-bl945\" (UID: \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\") " pod="openstack/root-account-create-update-bl945" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.648500 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-bl945" Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.840259 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"f29d7d4444dc0b3f55b10116e2b238f9169afed55f027f2dd5ef4b103bcc1da4"} Jan 22 05:35:27 crc kubenswrapper[4814]: I0122 05:35:27.840328 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"ca74a081d22b652e2087301c6deb507f921a6c05d8dcca53d81a45247dd0056a"} Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.599275 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-bl945"] Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.607035 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.912487 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-st6qn" event={"ID":"65c32786-ef8f-4498-aaef-4ec1dcebc57d","Type":"ContainerStarted","Data":"15ffb9cdfb3e5de75db0fc8522561149f16fc5f39a57f4270f3be516a8641af3"} Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.913979 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.919132 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"07eacdf33363dd7f047591187292e8e134c4787eab1c8323da07bf66def16ea2"} Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.919366 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"e7eb7182-d869-4625-99e2-6abc75aee22d","Type":"ContainerStarted","Data":"001665e7c8e1ac05a58521a9e37f1274d22a45a25d5395a73072880d0b87e938"} Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.921815 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-bl945" event={"ID":"729bdb41-4e49-4df6-a581-87fb0db6f3a0","Type":"ContainerStarted","Data":"8d2af743a7aca9ff498254942665cc91da4e8afba5629eec624cca4256c2d9c2"} Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.921904 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-bl945" event={"ID":"729bdb41-4e49-4df6-a581-87fb0db6f3a0","Type":"ContainerStarted","Data":"eefd9a3111173efb54c78b79c94065f8f3d0fcae9fc0bfc59869840fa82d451a"} Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.931823 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-st6qn" podStartSLOduration=2.015569021 podStartE2EDuration="16.931800807s" podCreationTimestamp="2026-01-22 05:35:19 +0000 UTC" firstStartedPulling="2026-01-22 05:35:20.365136824 +0000 UTC m=+1006.448625039" lastFinishedPulling="2026-01-22 05:35:35.28136861 +0000 UTC m=+1021.364856825" observedRunningTime="2026-01-22 05:35:35.930437425 +0000 UTC m=+1022.013925640" watchObservedRunningTime="2026-01-22 05:35:35.931800807 +0000 UTC m=+1022.015289022" Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.946353 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-bl945" podStartSLOduration=8.946334598 podStartE2EDuration="8.946334598s" podCreationTimestamp="2026-01-22 05:35:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:35.945183162 +0000 UTC m=+1022.028671377" watchObservedRunningTime="2026-01-22 05:35:35.946334598 +0000 UTC m=+1022.029822813" Jan 22 05:35:35 crc kubenswrapper[4814]: I0122 05:35:35.998545 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=27.834401843 podStartE2EDuration="34.998529725s" podCreationTimestamp="2026-01-22 05:35:01 +0000 UTC" firstStartedPulling="2026-01-22 05:35:19.060591746 +0000 UTC m=+1005.144079961" lastFinishedPulling="2026-01-22 05:35:26.224719618 +0000 UTC m=+1012.308207843" observedRunningTime="2026-01-22 05:35:35.996491272 +0000 UTC m=+1022.079979487" watchObservedRunningTime="2026-01-22 05:35:35.998529725 +0000 UTC m=+1022.082017940" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.045806 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.555941 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-vb7mm"] Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.556790 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.630341 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-6rwmc"] Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.631537 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.637710 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.646605 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-vb7mm"] Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.667388 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-6rwmc"] Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.747266 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-config\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.747300 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sfft\" (UniqueName: \"kubernetes.io/projected/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-kube-api-access-4sfft\") pod \"heat-db-create-vb7mm\" (UID: \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\") " pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.747351 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.747378 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.747422 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp72b\" (UniqueName: \"kubernetes.io/projected/02fc56a5-86a0-4983-8219-0c0f4f220b7b-kube-api-access-tp72b\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.747439 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.747468 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.747483 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-operator-scripts\") pod \"heat-db-create-vb7mm\" (UID: \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\") " pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.799149 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-4d21-account-create-update-vf2t4"] Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.805385 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.814216 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.817271 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-4d21-account-create-update-vf2t4"] Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.848412 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.848476 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.848495 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-operator-scripts\") pod \"heat-db-create-vb7mm\" (UID: \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\") " pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.848549 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-config\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.848567 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sfft\" (UniqueName: \"kubernetes.io/projected/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-kube-api-access-4sfft\") pod \"heat-db-create-vb7mm\" (UID: \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\") " pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.848598 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.848627 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.848684 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp72b\" (UniqueName: \"kubernetes.io/projected/02fc56a5-86a0-4983-8219-0c0f4f220b7b-kube-api-access-tp72b\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.853861 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.854390 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.854927 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-operator-scripts\") pod \"heat-db-create-vb7mm\" (UID: \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\") " pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.855453 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-config\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.856143 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.906950 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.914717 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp72b\" (UniqueName: \"kubernetes.io/projected/02fc56a5-86a0-4983-8219-0c0f4f220b7b-kube-api-access-tp72b\") pod \"dnsmasq-dns-77585f5f8c-6rwmc\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.935154 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sfft\" (UniqueName: \"kubernetes.io/projected/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-kube-api-access-4sfft\") pod \"heat-db-create-vb7mm\" (UID: \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\") " pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.947220 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.951543 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbdqz\" (UniqueName: \"kubernetes.io/projected/3631751f-0878-4972-a191-ff026a644832-kube-api-access-sbdqz\") pod \"heat-4d21-account-create-update-vf2t4\" (UID: \"3631751f-0878-4972-a191-ff026a644832\") " pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.951695 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3631751f-0878-4972-a191-ff026a644832-operator-scripts\") pod \"heat-4d21-account-create-update-vf2t4\" (UID: \"3631751f-0878-4972-a191-ff026a644832\") " pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.971155 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-zjkzp"] Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.972177 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.975288 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-zjkzp"] Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.988463 4814 generic.go:334] "Generic (PLEG): container finished" podID="729bdb41-4e49-4df6-a581-87fb0db6f3a0" containerID="8d2af743a7aca9ff498254942665cc91da4e8afba5629eec624cca4256c2d9c2" exitCode=0 Jan 22 05:35:36 crc kubenswrapper[4814]: I0122 05:35:36.990719 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-bl945" event={"ID":"729bdb41-4e49-4df6-a581-87fb0db6f3a0","Type":"ContainerDied","Data":"8d2af743a7aca9ff498254942665cc91da4e8afba5629eec624cca4256c2d9c2"} Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.036591 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-qwtsm"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.037674 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.056419 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3631751f-0878-4972-a191-ff026a644832-operator-scripts\") pod \"heat-4d21-account-create-update-vf2t4\" (UID: \"3631751f-0878-4972-a191-ff026a644832\") " pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.056536 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-695rq\" (UniqueName: \"kubernetes.io/projected/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-kube-api-access-695rq\") pod \"cinder-db-create-zjkzp\" (UID: \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\") " pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.056574 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbdqz\" (UniqueName: \"kubernetes.io/projected/3631751f-0878-4972-a191-ff026a644832-kube-api-access-sbdqz\") pod \"heat-4d21-account-create-update-vf2t4\" (UID: \"3631751f-0878-4972-a191-ff026a644832\") " pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.056600 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-operator-scripts\") pod \"cinder-db-create-zjkzp\" (UID: \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\") " pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.057739 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3631751f-0878-4972-a191-ff026a644832-operator-scripts\") pod \"heat-4d21-account-create-update-vf2t4\" (UID: \"3631751f-0878-4972-a191-ff026a644832\") " pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.067304 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-qwtsm"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.093215 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbdqz\" (UniqueName: \"kubernetes.io/projected/3631751f-0878-4972-a191-ff026a644832-kube-api-access-sbdqz\") pod \"heat-4d21-account-create-update-vf2t4\" (UID: \"3631751f-0878-4972-a191-ff026a644832\") " pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.129965 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.135740 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-81fe-account-create-update-pqrz5"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.138960 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.140918 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.157463 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9dc758e-58fd-4232-adf3-f9c9de238a9f-operator-scripts\") pod \"barbican-db-create-qwtsm\" (UID: \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\") " pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.157599 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-695rq\" (UniqueName: \"kubernetes.io/projected/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-kube-api-access-695rq\") pod \"cinder-db-create-zjkzp\" (UID: \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\") " pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.157659 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24sxk\" (UniqueName: \"kubernetes.io/projected/b9dc758e-58fd-4232-adf3-f9c9de238a9f-kube-api-access-24sxk\") pod \"barbican-db-create-qwtsm\" (UID: \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\") " pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.157700 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-operator-scripts\") pod \"cinder-db-create-zjkzp\" (UID: \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\") " pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.158307 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-operator-scripts\") pod \"cinder-db-create-zjkzp\" (UID: \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\") " pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.170678 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.178842 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-81fe-account-create-update-pqrz5"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.241245 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-1385-account-create-update-nqh2m"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.242567 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.257686 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.258556 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-operator-scripts\") pod \"barbican-81fe-account-create-update-pqrz5\" (UID: \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\") " pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.258613 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24sxk\" (UniqueName: \"kubernetes.io/projected/b9dc758e-58fd-4232-adf3-f9c9de238a9f-kube-api-access-24sxk\") pod \"barbican-db-create-qwtsm\" (UID: \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\") " pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.258663 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqvrw\" (UniqueName: \"kubernetes.io/projected/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-kube-api-access-dqvrw\") pod \"barbican-81fe-account-create-update-pqrz5\" (UID: \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\") " pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.258694 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9dc758e-58fd-4232-adf3-f9c9de238a9f-operator-scripts\") pod \"barbican-db-create-qwtsm\" (UID: \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\") " pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.259525 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9dc758e-58fd-4232-adf3-f9c9de238a9f-operator-scripts\") pod \"barbican-db-create-qwtsm\" (UID: \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\") " pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.275583 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-695rq\" (UniqueName: \"kubernetes.io/projected/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-kube-api-access-695rq\") pod \"cinder-db-create-zjkzp\" (UID: \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\") " pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.298542 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-4lt4r"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.299589 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.308009 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.308298 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8jbhz" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.308531 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.308742 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.309074 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24sxk\" (UniqueName: \"kubernetes.io/projected/b9dc758e-58fd-4232-adf3-f9c9de238a9f-kube-api-access-24sxk\") pod \"barbican-db-create-qwtsm\" (UID: \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\") " pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.327816 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-1385-account-create-update-nqh2m"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.357770 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-4lt4r"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.361020 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-operator-scripts\") pod \"barbican-81fe-account-create-update-pqrz5\" (UID: \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\") " pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.361066 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcjbl\" (UniqueName: \"kubernetes.io/projected/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-kube-api-access-dcjbl\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.361086 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8f36d3f-f478-4067-b71b-c799da7e07d9-operator-scripts\") pod \"cinder-1385-account-create-update-nqh2m\" (UID: \"a8f36d3f-f478-4067-b71b-c799da7e07d9\") " pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.361138 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqvrw\" (UniqueName: \"kubernetes.io/projected/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-kube-api-access-dqvrw\") pod \"barbican-81fe-account-create-update-pqrz5\" (UID: \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\") " pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.361161 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c84xv\" (UniqueName: \"kubernetes.io/projected/a8f36d3f-f478-4067-b71b-c799da7e07d9-kube-api-access-c84xv\") pod \"cinder-1385-account-create-update-nqh2m\" (UID: \"a8f36d3f-f478-4067-b71b-c799da7e07d9\") " pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.361202 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-combined-ca-bundle\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.361226 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-config-data\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.361881 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-operator-scripts\") pod \"barbican-81fe-account-create-update-pqrz5\" (UID: \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\") " pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.380893 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.388928 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.424995 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqvrw\" (UniqueName: \"kubernetes.io/projected/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-kube-api-access-dqvrw\") pod \"barbican-81fe-account-create-update-pqrz5\" (UID: \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\") " pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.429684 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-snjhg"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.430716 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.457760 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-9bcb-account-create-update-vvs4l"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.461357 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.463013 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcjbl\" (UniqueName: \"kubernetes.io/projected/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-kube-api-access-dcjbl\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.463129 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8f36d3f-f478-4067-b71b-c799da7e07d9-operator-scripts\") pod \"cinder-1385-account-create-update-nqh2m\" (UID: \"a8f36d3f-f478-4067-b71b-c799da7e07d9\") " pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.463248 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c84xv\" (UniqueName: \"kubernetes.io/projected/a8f36d3f-f478-4067-b71b-c799da7e07d9-kube-api-access-c84xv\") pod \"cinder-1385-account-create-update-nqh2m\" (UID: \"a8f36d3f-f478-4067-b71b-c799da7e07d9\") " pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.463365 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-combined-ca-bundle\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.463443 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-config-data\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.465153 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8f36d3f-f478-4067-b71b-c799da7e07d9-operator-scripts\") pod \"cinder-1385-account-create-update-nqh2m\" (UID: \"a8f36d3f-f478-4067-b71b-c799da7e07d9\") " pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.466620 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.479720 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-config-data\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.482488 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c84xv\" (UniqueName: \"kubernetes.io/projected/a8f36d3f-f478-4067-b71b-c799da7e07d9-kube-api-access-c84xv\") pod \"cinder-1385-account-create-update-nqh2m\" (UID: \"a8f36d3f-f478-4067-b71b-c799da7e07d9\") " pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.488158 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-combined-ca-bundle\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.489309 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcjbl\" (UniqueName: \"kubernetes.io/projected/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-kube-api-access-dcjbl\") pod \"keystone-db-sync-4lt4r\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.500417 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-snjhg"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.518345 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-9bcb-account-create-update-vvs4l"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.566493 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fpwq\" (UniqueName: \"kubernetes.io/projected/a9340e06-3a50-4f01-9314-44e5786484e1-kube-api-access-9fpwq\") pod \"neutron-9bcb-account-create-update-vvs4l\" (UID: \"a9340e06-3a50-4f01-9314-44e5786484e1\") " pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.566656 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9340e06-3a50-4f01-9314-44e5786484e1-operator-scripts\") pod \"neutron-9bcb-account-create-update-vvs4l\" (UID: \"a9340e06-3a50-4f01-9314-44e5786484e1\") " pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.566683 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc033b9f-32ee-44ed-85b1-4655c687ffe9-operator-scripts\") pod \"neutron-db-create-snjhg\" (UID: \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\") " pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.566707 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4jvl\" (UniqueName: \"kubernetes.io/projected/bc033b9f-32ee-44ed-85b1-4655c687ffe9-kube-api-access-q4jvl\") pod \"neutron-db-create-snjhg\" (UID: \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\") " pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.632189 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.654350 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.667950 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc033b9f-32ee-44ed-85b1-4655c687ffe9-operator-scripts\") pod \"neutron-db-create-snjhg\" (UID: \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\") " pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.668000 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4jvl\" (UniqueName: \"kubernetes.io/projected/bc033b9f-32ee-44ed-85b1-4655c687ffe9-kube-api-access-q4jvl\") pod \"neutron-db-create-snjhg\" (UID: \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\") " pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.668044 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fpwq\" (UniqueName: \"kubernetes.io/projected/a9340e06-3a50-4f01-9314-44e5786484e1-kube-api-access-9fpwq\") pod \"neutron-9bcb-account-create-update-vvs4l\" (UID: \"a9340e06-3a50-4f01-9314-44e5786484e1\") " pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.668141 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9340e06-3a50-4f01-9314-44e5786484e1-operator-scripts\") pod \"neutron-9bcb-account-create-update-vvs4l\" (UID: \"a9340e06-3a50-4f01-9314-44e5786484e1\") " pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.668780 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9340e06-3a50-4f01-9314-44e5786484e1-operator-scripts\") pod \"neutron-9bcb-account-create-update-vvs4l\" (UID: \"a9340e06-3a50-4f01-9314-44e5786484e1\") " pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.669211 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc033b9f-32ee-44ed-85b1-4655c687ffe9-operator-scripts\") pod \"neutron-db-create-snjhg\" (UID: \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\") " pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.693564 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fpwq\" (UniqueName: \"kubernetes.io/projected/a9340e06-3a50-4f01-9314-44e5786484e1-kube-api-access-9fpwq\") pod \"neutron-9bcb-account-create-update-vvs4l\" (UID: \"a9340e06-3a50-4f01-9314-44e5786484e1\") " pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.697045 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4jvl\" (UniqueName: \"kubernetes.io/projected/bc033b9f-32ee-44ed-85b1-4655c687ffe9-kube-api-access-q4jvl\") pod \"neutron-db-create-snjhg\" (UID: \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\") " pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.717380 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.789202 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.829897 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.852307 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-4d21-account-create-update-vf2t4"] Jan 22 05:35:37 crc kubenswrapper[4814]: I0122 05:35:37.903128 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-6rwmc"] Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.023740 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" event={"ID":"02fc56a5-86a0-4983-8219-0c0f4f220b7b","Type":"ContainerStarted","Data":"258eeaeb2d8f75334af3fbb29034005202978ef07944789823b7f9c3913521e8"} Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.029924 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-4d21-account-create-update-vf2t4" event={"ID":"3631751f-0878-4972-a191-ff026a644832","Type":"ContainerStarted","Data":"2a93d6fbedf062f4edaebc702931fb07ca27659a67ee6d4252ee42d9fffd416a"} Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.145691 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-vb7mm"] Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.161252 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-zjkzp"] Jan 22 05:35:38 crc kubenswrapper[4814]: W0122 05:35:38.189255 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b16ab55_25e3_4a6e_9ffd_f286d9f2dfac.slice/crio-9c18e97e04ef8f5aa3db020327e4ebe3aa45bab58e79054f0a56f4c977bc5dc9 WatchSource:0}: Error finding container 9c18e97e04ef8f5aa3db020327e4ebe3aa45bab58e79054f0a56f4c977bc5dc9: Status 404 returned error can't find the container with id 9c18e97e04ef8f5aa3db020327e4ebe3aa45bab58e79054f0a56f4c977bc5dc9 Jan 22 05:35:38 crc kubenswrapper[4814]: W0122 05:35:38.207092 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc71918d0_b384_4bf5_b8ee_a338ff72d9e9.slice/crio-d54f8cd2a5a69d4fbceb5b435390a22c781c5407b70641c6eb4b643c812f7d93 WatchSource:0}: Error finding container d54f8cd2a5a69d4fbceb5b435390a22c781c5407b70641c6eb4b643c812f7d93: Status 404 returned error can't find the container with id d54f8cd2a5a69d4fbceb5b435390a22c781c5407b70641c6eb4b643c812f7d93 Jan 22 05:35:38 crc kubenswrapper[4814]: W0122 05:35:38.213355 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9dc758e_58fd_4232_adf3_f9c9de238a9f.slice/crio-17c4c3601b8bef1e1a199c28a5135636999aa1e4b5e8edb151c3cd0c074c4d71 WatchSource:0}: Error finding container 17c4c3601b8bef1e1a199c28a5135636999aa1e4b5e8edb151c3cd0c074c4d71: Status 404 returned error can't find the container with id 17c4c3601b8bef1e1a199c28a5135636999aa1e4b5e8edb151c3cd0c074c4d71 Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.225117 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-qwtsm"] Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.530514 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-1385-account-create-update-nqh2m"] Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.530547 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-snjhg"] Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.547328 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-81fe-account-create-update-pqrz5"] Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.563760 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-4lt4r"] Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.573188 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-bl945" Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.623258 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729bdb41-4e49-4df6-a581-87fb0db6f3a0-operator-scripts\") pod \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\" (UID: \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\") " Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.623409 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc4gx\" (UniqueName: \"kubernetes.io/projected/729bdb41-4e49-4df6-a581-87fb0db6f3a0-kube-api-access-vc4gx\") pod \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\" (UID: \"729bdb41-4e49-4df6-a581-87fb0db6f3a0\") " Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.624141 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/729bdb41-4e49-4df6-a581-87fb0db6f3a0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "729bdb41-4e49-4df6-a581-87fb0db6f3a0" (UID: "729bdb41-4e49-4df6-a581-87fb0db6f3a0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.680737 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/729bdb41-4e49-4df6-a581-87fb0db6f3a0-kube-api-access-vc4gx" (OuterVolumeSpecName: "kube-api-access-vc4gx") pod "729bdb41-4e49-4df6-a581-87fb0db6f3a0" (UID: "729bdb41-4e49-4df6-a581-87fb0db6f3a0"). InnerVolumeSpecName "kube-api-access-vc4gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.724538 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-9bcb-account-create-update-vvs4l"] Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.724607 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc4gx\" (UniqueName: \"kubernetes.io/projected/729bdb41-4e49-4df6-a581-87fb0db6f3a0-kube-api-access-vc4gx\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:38 crc kubenswrapper[4814]: I0122 05:35:38.724642 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/729bdb41-4e49-4df6-a581-87fb0db6f3a0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.044218 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-4d21-account-create-update-vf2t4" event={"ID":"3631751f-0878-4972-a191-ff026a644832","Type":"ContainerStarted","Data":"7d1ab6d13de22aad2109f1a0b0ec69caa673eeee3dbdfa188178d9f5b6ac8265"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.049961 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-81fe-account-create-update-pqrz5" event={"ID":"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1","Type":"ContainerStarted","Data":"2abd6dffe7f9335bc7756615fd3f3f6d8e83dc87d8c33db3c730545376aef1e7"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.058995 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-zjkzp" event={"ID":"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac","Type":"ContainerStarted","Data":"5cfd5a5c716f1bcc395c2f6b4d84a5b72a1218ad1ea5d63a591a9f5700c3f854"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.059035 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-zjkzp" event={"ID":"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac","Type":"ContainerStarted","Data":"9c18e97e04ef8f5aa3db020327e4ebe3aa45bab58e79054f0a56f4c977bc5dc9"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.082941 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-vb7mm" event={"ID":"c71918d0-b384-4bf5-b8ee-a338ff72d9e9","Type":"ContainerStarted","Data":"1e0c50551c24b420ee7d50dc7ac465d53663130cf8564ed6c0dec8119b48025a"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.082979 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-vb7mm" event={"ID":"c71918d0-b384-4bf5-b8ee-a338ff72d9e9","Type":"ContainerStarted","Data":"d54f8cd2a5a69d4fbceb5b435390a22c781c5407b70641c6eb4b643c812f7d93"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.084715 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-4d21-account-create-update-vf2t4" podStartSLOduration=3.084699568 podStartE2EDuration="3.084699568s" podCreationTimestamp="2026-01-22 05:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:39.076959339 +0000 UTC m=+1025.160447554" watchObservedRunningTime="2026-01-22 05:35:39.084699568 +0000 UTC m=+1025.168187783" Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.092793 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1385-account-create-update-nqh2m" event={"ID":"a8f36d3f-f478-4067-b71b-c799da7e07d9","Type":"ContainerStarted","Data":"7f9df17b5ced0194c32396076d5b5f27673e218d88c491b3fe5ee5393315eef9"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.097667 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-bl945" event={"ID":"729bdb41-4e49-4df6-a581-87fb0db6f3a0","Type":"ContainerDied","Data":"eefd9a3111173efb54c78b79c94065f8f3d0fcae9fc0bfc59869840fa82d451a"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.097698 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eefd9a3111173efb54c78b79c94065f8f3d0fcae9fc0bfc59869840fa82d451a" Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.097763 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-bl945" Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.101114 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-zjkzp" podStartSLOduration=3.101100006 podStartE2EDuration="3.101100006s" podCreationTimestamp="2026-01-22 05:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:39.09894476 +0000 UTC m=+1025.182432975" watchObservedRunningTime="2026-01-22 05:35:39.101100006 +0000 UTC m=+1025.184588221" Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.117183 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9bcb-account-create-update-vvs4l" event={"ID":"a9340e06-3a50-4f01-9314-44e5786484e1","Type":"ContainerStarted","Data":"f1f421b758b99fa59bd946acdb8384c237d364e3acdb05194f633d21401ee768"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.169371 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qwtsm" event={"ID":"b9dc758e-58fd-4232-adf3-f9c9de238a9f","Type":"ContainerStarted","Data":"5fd8aef9f5b5e015e12b747ea333dbba33a7e80c266fde34e7e858499a61ea88"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.169612 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qwtsm" event={"ID":"b9dc758e-58fd-4232-adf3-f9c9de238a9f","Type":"ContainerStarted","Data":"17c4c3601b8bef1e1a199c28a5135636999aa1e4b5e8edb151c3cd0c074c4d71"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.185995 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-vb7mm" podStartSLOduration=3.185980267 podStartE2EDuration="3.185980267s" podCreationTimestamp="2026-01-22 05:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:39.149484536 +0000 UTC m=+1025.232972751" watchObservedRunningTime="2026-01-22 05:35:39.185980267 +0000 UTC m=+1025.269468482" Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.188284 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-snjhg" event={"ID":"bc033b9f-32ee-44ed-85b1-4655c687ffe9","Type":"ContainerStarted","Data":"da40020aca6941797ad6368460cbff2ab9b88f04583de22e8c4e109964a0d68d"} Jan 22 05:35:39 crc kubenswrapper[4814]: I0122 05:35:39.189839 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4lt4r" event={"ID":"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979","Type":"ContainerStarted","Data":"eb38b678a909fd884d4297ac53906a5701383c8ba9666bb968cbaa2a56fd58f9"} Jan 22 05:35:39 crc kubenswrapper[4814]: E0122 05:35:39.826197 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02fc56a5_86a0_4983_8219_0c0f4f220b7b.slice/crio-conmon-62d72c115f899c875878b510db9102c030d9927de129565d7be5798b3d9f2d6e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3631751f_0878_4972_a191_ff026a644832.slice/crio-conmon-7d1ab6d13de22aad2109f1a0b0ec69caa673eeee3dbdfa188178d9f5b6ac8265.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3631751f_0878_4972_a191_ff026a644832.slice/crio-7d1ab6d13de22aad2109f1a0b0ec69caa673eeee3dbdfa188178d9f5b6ac8265.scope\": RecentStats: unable to find data in memory cache]" Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.199518 4814 generic.go:334] "Generic (PLEG): container finished" podID="4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac" containerID="5cfd5a5c716f1bcc395c2f6b4d84a5b72a1218ad1ea5d63a591a9f5700c3f854" exitCode=0 Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.199901 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-zjkzp" event={"ID":"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac","Type":"ContainerDied","Data":"5cfd5a5c716f1bcc395c2f6b4d84a5b72a1218ad1ea5d63a591a9f5700c3f854"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.204357 4814 generic.go:334] "Generic (PLEG): container finished" podID="c71918d0-b384-4bf5-b8ee-a338ff72d9e9" containerID="1e0c50551c24b420ee7d50dc7ac465d53663130cf8564ed6c0dec8119b48025a" exitCode=0 Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.204445 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-vb7mm" event={"ID":"c71918d0-b384-4bf5-b8ee-a338ff72d9e9","Type":"ContainerDied","Data":"1e0c50551c24b420ee7d50dc7ac465d53663130cf8564ed6c0dec8119b48025a"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.205950 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1385-account-create-update-nqh2m" event={"ID":"a8f36d3f-f478-4067-b71b-c799da7e07d9","Type":"ContainerStarted","Data":"af71b291dc88af0eb592a02b111c9c780b02a7bdaced65b2a255365011bc3217"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.207801 4814 generic.go:334] "Generic (PLEG): container finished" podID="a9340e06-3a50-4f01-9314-44e5786484e1" containerID="9ae96210948d6f5cdf7bfe3e94d50f77e834c4210ec97455b778d8b80c643b83" exitCode=0 Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.207837 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9bcb-account-create-update-vvs4l" event={"ID":"a9340e06-3a50-4f01-9314-44e5786484e1","Type":"ContainerDied","Data":"9ae96210948d6f5cdf7bfe3e94d50f77e834c4210ec97455b778d8b80c643b83"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.208848 4814 generic.go:334] "Generic (PLEG): container finished" podID="b9dc758e-58fd-4232-adf3-f9c9de238a9f" containerID="5fd8aef9f5b5e015e12b747ea333dbba33a7e80c266fde34e7e858499a61ea88" exitCode=0 Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.208884 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qwtsm" event={"ID":"b9dc758e-58fd-4232-adf3-f9c9de238a9f","Type":"ContainerDied","Data":"5fd8aef9f5b5e015e12b747ea333dbba33a7e80c266fde34e7e858499a61ea88"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.210569 4814 generic.go:334] "Generic (PLEG): container finished" podID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerID="62d72c115f899c875878b510db9102c030d9927de129565d7be5798b3d9f2d6e" exitCode=0 Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.210610 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" event={"ID":"02fc56a5-86a0-4983-8219-0c0f4f220b7b","Type":"ContainerDied","Data":"62d72c115f899c875878b510db9102c030d9927de129565d7be5798b3d9f2d6e"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.227225 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-81fe-account-create-update-pqrz5" event={"ID":"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1","Type":"ContainerStarted","Data":"79e7df3871c956730177ee4046e1b78ca68df163829e50fa865215eb65440b0c"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.230348 4814 generic.go:334] "Generic (PLEG): container finished" podID="3631751f-0878-4972-a191-ff026a644832" containerID="7d1ab6d13de22aad2109f1a0b0ec69caa673eeee3dbdfa188178d9f5b6ac8265" exitCode=0 Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.230403 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-4d21-account-create-update-vf2t4" event={"ID":"3631751f-0878-4972-a191-ff026a644832","Type":"ContainerDied","Data":"7d1ab6d13de22aad2109f1a0b0ec69caa673eeee3dbdfa188178d9f5b6ac8265"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.239040 4814 generic.go:334] "Generic (PLEG): container finished" podID="bc033b9f-32ee-44ed-85b1-4655c687ffe9" containerID="5418634d65afda0304ddb4ca14c9a6ebce7c8c59a2dfb6c35929c9506170b69a" exitCode=0 Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.239117 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-snjhg" event={"ID":"bc033b9f-32ee-44ed-85b1-4655c687ffe9","Type":"ContainerDied","Data":"5418634d65afda0304ddb4ca14c9a6ebce7c8c59a2dfb6c35929c9506170b69a"} Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.247444 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-qwtsm" podStartSLOduration=4.247429962 podStartE2EDuration="4.247429962s" podCreationTimestamp="2026-01-22 05:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:39.200024192 +0000 UTC m=+1025.283512407" watchObservedRunningTime="2026-01-22 05:35:40.247429962 +0000 UTC m=+1026.330918177" Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.446033 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-1385-account-create-update-nqh2m" podStartSLOduration=3.446017367 podStartE2EDuration="3.446017367s" podCreationTimestamp="2026-01-22 05:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:40.441171366 +0000 UTC m=+1026.524659581" watchObservedRunningTime="2026-01-22 05:35:40.446017367 +0000 UTC m=+1026.529505572" Jan 22 05:35:40 crc kubenswrapper[4814]: I0122 05:35:40.486948 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-81fe-account-create-update-pqrz5" podStartSLOduration=3.486917224 podStartE2EDuration="3.486917224s" podCreationTimestamp="2026-01-22 05:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:40.485064037 +0000 UTC m=+1026.568552252" watchObservedRunningTime="2026-01-22 05:35:40.486917224 +0000 UTC m=+1026.570405439" Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.247891 4814 generic.go:334] "Generic (PLEG): container finished" podID="a8f36d3f-f478-4067-b71b-c799da7e07d9" containerID="af71b291dc88af0eb592a02b111c9c780b02a7bdaced65b2a255365011bc3217" exitCode=0 Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.248327 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1385-account-create-update-nqh2m" event={"ID":"a8f36d3f-f478-4067-b71b-c799da7e07d9","Type":"ContainerDied","Data":"af71b291dc88af0eb592a02b111c9c780b02a7bdaced65b2a255365011bc3217"} Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.251062 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" event={"ID":"02fc56a5-86a0-4983-8219-0c0f4f220b7b","Type":"ContainerStarted","Data":"888e29db900467c06d89a2ca16d7477bfcd2f0e54110ca90eae2b05a4f535deb"} Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.252129 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.254776 4814 generic.go:334] "Generic (PLEG): container finished" podID="0cb80edb-5104-4f39-b8a5-2c285bdc1ff1" containerID="79e7df3871c956730177ee4046e1b78ca68df163829e50fa865215eb65440b0c" exitCode=0 Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.255074 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-81fe-account-create-update-pqrz5" event={"ID":"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1","Type":"ContainerDied","Data":"79e7df3871c956730177ee4046e1b78ca68df163829e50fa865215eb65440b0c"} Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.312272 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" podStartSLOduration=5.312254302 podStartE2EDuration="5.312254302s" podCreationTimestamp="2026-01-22 05:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:41.296278977 +0000 UTC m=+1027.379767192" watchObservedRunningTime="2026-01-22 05:35:41.312254302 +0000 UTC m=+1027.395742517" Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.642770 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.735138 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9dc758e-58fd-4232-adf3-f9c9de238a9f-operator-scripts\") pod \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\" (UID: \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\") " Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.735454 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24sxk\" (UniqueName: \"kubernetes.io/projected/b9dc758e-58fd-4232-adf3-f9c9de238a9f-kube-api-access-24sxk\") pod \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\" (UID: \"b9dc758e-58fd-4232-adf3-f9c9de238a9f\") " Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.735823 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9dc758e-58fd-4232-adf3-f9c9de238a9f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b9dc758e-58fd-4232-adf3-f9c9de238a9f" (UID: "b9dc758e-58fd-4232-adf3-f9c9de238a9f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.736602 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9dc758e-58fd-4232-adf3-f9c9de238a9f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.751181 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9dc758e-58fd-4232-adf3-f9c9de238a9f-kube-api-access-24sxk" (OuterVolumeSpecName: "kube-api-access-24sxk") pod "b9dc758e-58fd-4232-adf3-f9c9de238a9f" (UID: "b9dc758e-58fd-4232-adf3-f9c9de238a9f"). InnerVolumeSpecName "kube-api-access-24sxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:41 crc kubenswrapper[4814]: I0122 05:35:41.838122 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24sxk\" (UniqueName: \"kubernetes.io/projected/b9dc758e-58fd-4232-adf3-f9c9de238a9f-kube-api-access-24sxk\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:42 crc kubenswrapper[4814]: I0122 05:35:42.274059 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-qwtsm" Jan 22 05:35:42 crc kubenswrapper[4814]: I0122 05:35:42.278726 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-qwtsm" event={"ID":"b9dc758e-58fd-4232-adf3-f9c9de238a9f","Type":"ContainerDied","Data":"17c4c3601b8bef1e1a199c28a5135636999aa1e4b5e8edb151c3cd0c074c4d71"} Jan 22 05:35:42 crc kubenswrapper[4814]: I0122 05:35:42.278803 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17c4c3601b8bef1e1a199c28a5135636999aa1e4b5e8edb151c3cd0c074c4d71" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.352067 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-9bcb-account-create-update-vvs4l" event={"ID":"a9340e06-3a50-4f01-9314-44e5786484e1","Type":"ContainerDied","Data":"f1f421b758b99fa59bd946acdb8384c237d364e3acdb05194f633d21401ee768"} Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.352109 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1f421b758b99fa59bd946acdb8384c237d364e3acdb05194f633d21401ee768" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.354889 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-4d21-account-create-update-vf2t4" event={"ID":"3631751f-0878-4972-a191-ff026a644832","Type":"ContainerDied","Data":"2a93d6fbedf062f4edaebc702931fb07ca27659a67ee6d4252ee42d9fffd416a"} Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.354927 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a93d6fbedf062f4edaebc702931fb07ca27659a67ee6d4252ee42d9fffd416a" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.356456 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-81fe-account-create-update-pqrz5" event={"ID":"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1","Type":"ContainerDied","Data":"2abd6dffe7f9335bc7756615fd3f3f6d8e83dc87d8c33db3c730545376aef1e7"} Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.356479 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2abd6dffe7f9335bc7756615fd3f3f6d8e83dc87d8c33db3c730545376aef1e7" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.358205 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-snjhg" event={"ID":"bc033b9f-32ee-44ed-85b1-4655c687ffe9","Type":"ContainerDied","Data":"da40020aca6941797ad6368460cbff2ab9b88f04583de22e8c4e109964a0d68d"} Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.358226 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da40020aca6941797ad6368460cbff2ab9b88f04583de22e8c4e109964a0d68d" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.362695 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-zjkzp" event={"ID":"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac","Type":"ContainerDied","Data":"9c18e97e04ef8f5aa3db020327e4ebe3aa45bab58e79054f0a56f4c977bc5dc9"} Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.362715 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c18e97e04ef8f5aa3db020327e4ebe3aa45bab58e79054f0a56f4c977bc5dc9" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.366336 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-1385-account-create-update-nqh2m" event={"ID":"a8f36d3f-f478-4067-b71b-c799da7e07d9","Type":"ContainerDied","Data":"7f9df17b5ced0194c32396076d5b5f27673e218d88c491b3fe5ee5393315eef9"} Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.366370 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f9df17b5ced0194c32396076d5b5f27673e218d88c491b3fe5ee5393315eef9" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.367066 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.372060 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-vb7mm" event={"ID":"c71918d0-b384-4bf5-b8ee-a338ff72d9e9","Type":"ContainerDied","Data":"d54f8cd2a5a69d4fbceb5b435390a22c781c5407b70641c6eb4b643c812f7d93"} Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.372083 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d54f8cd2a5a69d4fbceb5b435390a22c781c5407b70641c6eb4b643c812f7d93" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.469764 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.490940 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.495441 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.500516 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.508077 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.518855 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3631751f-0878-4972-a191-ff026a644832-operator-scripts\") pod \"3631751f-0878-4972-a191-ff026a644832\" (UID: \"3631751f-0878-4972-a191-ff026a644832\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.519038 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbdqz\" (UniqueName: \"kubernetes.io/projected/3631751f-0878-4972-a191-ff026a644832-kube-api-access-sbdqz\") pod \"3631751f-0878-4972-a191-ff026a644832\" (UID: \"3631751f-0878-4972-a191-ff026a644832\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.522193 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3631751f-0878-4972-a191-ff026a644832-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3631751f-0878-4972-a191-ff026a644832" (UID: "3631751f-0878-4972-a191-ff026a644832"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.527562 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3631751f-0878-4972-a191-ff026a644832-kube-api-access-sbdqz" (OuterVolumeSpecName: "kube-api-access-sbdqz") pod "3631751f-0878-4972-a191-ff026a644832" (UID: "3631751f-0878-4972-a191-ff026a644832"). InnerVolumeSpecName "kube-api-access-sbdqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.537752 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620038 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8f36d3f-f478-4067-b71b-c799da7e07d9-operator-scripts\") pod \"a8f36d3f-f478-4067-b71b-c799da7e07d9\" (UID: \"a8f36d3f-f478-4067-b71b-c799da7e07d9\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620082 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4jvl\" (UniqueName: \"kubernetes.io/projected/bc033b9f-32ee-44ed-85b1-4655c687ffe9-kube-api-access-q4jvl\") pod \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\" (UID: \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620119 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4sfft\" (UniqueName: \"kubernetes.io/projected/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-kube-api-access-4sfft\") pod \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\" (UID: \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620146 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqvrw\" (UniqueName: \"kubernetes.io/projected/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-kube-api-access-dqvrw\") pod \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\" (UID: \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620201 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-695rq\" (UniqueName: \"kubernetes.io/projected/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-kube-api-access-695rq\") pod \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\" (UID: \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620246 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9340e06-3a50-4f01-9314-44e5786484e1-operator-scripts\") pod \"a9340e06-3a50-4f01-9314-44e5786484e1\" (UID: \"a9340e06-3a50-4f01-9314-44e5786484e1\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620283 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-operator-scripts\") pod \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\" (UID: \"4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620323 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc033b9f-32ee-44ed-85b1-4655c687ffe9-operator-scripts\") pod \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\" (UID: \"bc033b9f-32ee-44ed-85b1-4655c687ffe9\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620353 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-operator-scripts\") pod \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\" (UID: \"0cb80edb-5104-4f39-b8a5-2c285bdc1ff1\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620373 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-operator-scripts\") pod \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\" (UID: \"c71918d0-b384-4bf5-b8ee-a338ff72d9e9\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620405 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c84xv\" (UniqueName: \"kubernetes.io/projected/a8f36d3f-f478-4067-b71b-c799da7e07d9-kube-api-access-c84xv\") pod \"a8f36d3f-f478-4067-b71b-c799da7e07d9\" (UID: \"a8f36d3f-f478-4067-b71b-c799da7e07d9\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620433 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fpwq\" (UniqueName: \"kubernetes.io/projected/a9340e06-3a50-4f01-9314-44e5786484e1-kube-api-access-9fpwq\") pod \"a9340e06-3a50-4f01-9314-44e5786484e1\" (UID: \"a9340e06-3a50-4f01-9314-44e5786484e1\") " Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620749 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3631751f-0878-4972-a191-ff026a644832-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.620762 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbdqz\" (UniqueName: \"kubernetes.io/projected/3631751f-0878-4972-a191-ff026a644832-kube-api-access-sbdqz\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.621218 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8f36d3f-f478-4067-b71b-c799da7e07d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a8f36d3f-f478-4067-b71b-c799da7e07d9" (UID: "a8f36d3f-f478-4067-b71b-c799da7e07d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.621854 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9340e06-3a50-4f01-9314-44e5786484e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a9340e06-3a50-4f01-9314-44e5786484e1" (UID: "a9340e06-3a50-4f01-9314-44e5786484e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.623256 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-kube-api-access-dqvrw" (OuterVolumeSpecName: "kube-api-access-dqvrw") pod "0cb80edb-5104-4f39-b8a5-2c285bdc1ff1" (UID: "0cb80edb-5104-4f39-b8a5-2c285bdc1ff1"). InnerVolumeSpecName "kube-api-access-dqvrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.623715 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9340e06-3a50-4f01-9314-44e5786484e1-kube-api-access-9fpwq" (OuterVolumeSpecName: "kube-api-access-9fpwq") pod "a9340e06-3a50-4f01-9314-44e5786484e1" (UID: "a9340e06-3a50-4f01-9314-44e5786484e1"). InnerVolumeSpecName "kube-api-access-9fpwq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.624166 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c71918d0-b384-4bf5-b8ee-a338ff72d9e9" (UID: "c71918d0-b384-4bf5-b8ee-a338ff72d9e9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.624213 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc033b9f-32ee-44ed-85b1-4655c687ffe9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bc033b9f-32ee-44ed-85b1-4655c687ffe9" (UID: "bc033b9f-32ee-44ed-85b1-4655c687ffe9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.624405 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac" (UID: "4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.624504 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0cb80edb-5104-4f39-b8a5-2c285bdc1ff1" (UID: "0cb80edb-5104-4f39-b8a5-2c285bdc1ff1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.626574 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-kube-api-access-695rq" (OuterVolumeSpecName: "kube-api-access-695rq") pod "4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac" (UID: "4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac"). InnerVolumeSpecName "kube-api-access-695rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.626788 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8f36d3f-f478-4067-b71b-c799da7e07d9-kube-api-access-c84xv" (OuterVolumeSpecName: "kube-api-access-c84xv") pod "a8f36d3f-f478-4067-b71b-c799da7e07d9" (UID: "a8f36d3f-f478-4067-b71b-c799da7e07d9"). InnerVolumeSpecName "kube-api-access-c84xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.627049 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-kube-api-access-4sfft" (OuterVolumeSpecName: "kube-api-access-4sfft") pod "c71918d0-b384-4bf5-b8ee-a338ff72d9e9" (UID: "c71918d0-b384-4bf5-b8ee-a338ff72d9e9"). InnerVolumeSpecName "kube-api-access-4sfft". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.629719 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc033b9f-32ee-44ed-85b1-4655c687ffe9-kube-api-access-q4jvl" (OuterVolumeSpecName: "kube-api-access-q4jvl") pod "bc033b9f-32ee-44ed-85b1-4655c687ffe9" (UID: "bc033b9f-32ee-44ed-85b1-4655c687ffe9"). InnerVolumeSpecName "kube-api-access-q4jvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724370 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4jvl\" (UniqueName: \"kubernetes.io/projected/bc033b9f-32ee-44ed-85b1-4655c687ffe9-kube-api-access-q4jvl\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724421 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4sfft\" (UniqueName: \"kubernetes.io/projected/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-kube-api-access-4sfft\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724432 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqvrw\" (UniqueName: \"kubernetes.io/projected/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-kube-api-access-dqvrw\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724442 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-695rq\" (UniqueName: \"kubernetes.io/projected/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-kube-api-access-695rq\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724455 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9340e06-3a50-4f01-9314-44e5786484e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724466 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724476 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc033b9f-32ee-44ed-85b1-4655c687ffe9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724496 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724505 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c71918d0-b384-4bf5-b8ee-a338ff72d9e9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724514 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c84xv\" (UniqueName: \"kubernetes.io/projected/a8f36d3f-f478-4067-b71b-c799da7e07d9-kube-api-access-c84xv\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724526 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fpwq\" (UniqueName: \"kubernetes.io/projected/a9340e06-3a50-4f01-9314-44e5786484e1-kube-api-access-9fpwq\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:45 crc kubenswrapper[4814]: I0122 05:35:45.724536 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8f36d3f-f478-4067-b71b-c799da7e07d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.408312 4814 generic.go:334] "Generic (PLEG): container finished" podID="65c32786-ef8f-4498-aaef-4ec1dcebc57d" containerID="15ffb9cdfb3e5de75db0fc8522561149f16fc5f39a57f4270f3be516a8641af3" exitCode=0 Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.409017 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-st6qn" event={"ID":"65c32786-ef8f-4498-aaef-4ec1dcebc57d","Type":"ContainerDied","Data":"15ffb9cdfb3e5de75db0fc8522561149f16fc5f39a57f4270f3be516a8641af3"} Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.413360 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-vb7mm" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.417911 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-snjhg" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.453176 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-4d21-account-create-update-vf2t4" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.453255 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-81fe-account-create-update-pqrz5" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.454263 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-1385-account-create-update-nqh2m" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.454482 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-zjkzp" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.455221 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-9bcb-account-create-update-vvs4l" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.455888 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4lt4r" event={"ID":"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979","Type":"ContainerStarted","Data":"326e264611b4534e81b504b48543ea7a88696075c43b9f4c3e976f8940bdbc8e"} Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.489197 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-4lt4r" podStartSLOduration=2.763710801 podStartE2EDuration="9.489176489s" podCreationTimestamp="2026-01-22 05:35:37 +0000 UTC" firstStartedPulling="2026-01-22 05:35:38.539829612 +0000 UTC m=+1024.623317827" lastFinishedPulling="2026-01-22 05:35:45.26529528 +0000 UTC m=+1031.348783515" observedRunningTime="2026-01-22 05:35:46.484127702 +0000 UTC m=+1032.567615917" watchObservedRunningTime="2026-01-22 05:35:46.489176489 +0000 UTC m=+1032.572664704" Jan 22 05:35:46 crc kubenswrapper[4814]: I0122 05:35:46.949771 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.003526 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tkzhv"] Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.003774 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-tkzhv" podUID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" containerName="dnsmasq-dns" containerID="cri-o://6bc0e39c263a87bf5acd852a15127c3e30522ce1b8d70bc9c8199022bad57eaf" gracePeriod=10 Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.437503 4814 generic.go:334] "Generic (PLEG): container finished" podID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" containerID="6bc0e39c263a87bf5acd852a15127c3e30522ce1b8d70bc9c8199022bad57eaf" exitCode=0 Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.443572 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tkzhv" event={"ID":"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33","Type":"ContainerDied","Data":"6bc0e39c263a87bf5acd852a15127c3e30522ce1b8d70bc9c8199022bad57eaf"} Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.443649 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-tkzhv" event={"ID":"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33","Type":"ContainerDied","Data":"7794a3572cb345dbd00530047c42b03588e32254130b496d3628695fc4871686"} Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.443662 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7794a3572cb345dbd00530047c42b03588e32254130b496d3628695fc4871686" Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.473478 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.585725 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-nb\") pod \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.585838 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkzxh\" (UniqueName: \"kubernetes.io/projected/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-kube-api-access-bkzxh\") pod \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.585936 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-config\") pod \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.585956 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-sb\") pod \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.585981 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-dns-svc\") pod \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\" (UID: \"a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33\") " Jan 22 05:35:47 crc kubenswrapper[4814]: I0122 05:35:47.600854 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-kube-api-access-bkzxh" (OuterVolumeSpecName: "kube-api-access-bkzxh") pod "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" (UID: "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33"). InnerVolumeSpecName "kube-api-access-bkzxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.650500 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" (UID: "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.663372 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-config" (OuterVolumeSpecName: "config") pod "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" (UID: "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.673025 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" (UID: "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.687837 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkzxh\" (UniqueName: \"kubernetes.io/projected/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-kube-api-access-bkzxh\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.687868 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.687877 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.687886 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.706935 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" (UID: "a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.772585 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.788826 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.890245 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-combined-ca-bundle\") pod \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.890314 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-config-data\") pod \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.890394 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlnl6\" (UniqueName: \"kubernetes.io/projected/65c32786-ef8f-4498-aaef-4ec1dcebc57d-kube-api-access-mlnl6\") pod \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.890448 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-db-sync-config-data\") pod \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\" (UID: \"65c32786-ef8f-4498-aaef-4ec1dcebc57d\") " Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.894902 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "65c32786-ef8f-4498-aaef-4ec1dcebc57d" (UID: "65c32786-ef8f-4498-aaef-4ec1dcebc57d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.897093 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65c32786-ef8f-4498-aaef-4ec1dcebc57d-kube-api-access-mlnl6" (OuterVolumeSpecName: "kube-api-access-mlnl6") pod "65c32786-ef8f-4498-aaef-4ec1dcebc57d" (UID: "65c32786-ef8f-4498-aaef-4ec1dcebc57d"). InnerVolumeSpecName "kube-api-access-mlnl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.915860 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65c32786-ef8f-4498-aaef-4ec1dcebc57d" (UID: "65c32786-ef8f-4498-aaef-4ec1dcebc57d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.934554 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-config-data" (OuterVolumeSpecName: "config-data") pod "65c32786-ef8f-4498-aaef-4ec1dcebc57d" (UID: "65c32786-ef8f-4498-aaef-4ec1dcebc57d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.991600 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.991647 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.991659 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlnl6\" (UniqueName: \"kubernetes.io/projected/65c32786-ef8f-4498-aaef-4ec1dcebc57d-kube-api-access-mlnl6\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:47.991669 4814 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/65c32786-ef8f-4498-aaef-4ec1dcebc57d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.451201 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-tkzhv" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.451214 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-st6qn" event={"ID":"65c32786-ef8f-4498-aaef-4ec1dcebc57d","Type":"ContainerDied","Data":"1a831592faed72613c9ad4cb5cb5d8de16350987d6a9ca28338deffd776134f1"} Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.451268 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a831592faed72613c9ad4cb5cb5d8de16350987d6a9ca28338deffd776134f1" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.451206 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-st6qn" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.484022 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tkzhv"] Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.493961 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-tkzhv"] Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.850502 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-tl768"] Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851097 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c71918d0-b384-4bf5-b8ee-a338ff72d9e9" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851117 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c71918d0-b384-4bf5-b8ee-a338ff72d9e9" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851129 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851137 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851152 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9340e06-3a50-4f01-9314-44e5786484e1" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851166 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9340e06-3a50-4f01-9314-44e5786484e1" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851180 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8f36d3f-f478-4067-b71b-c799da7e07d9" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851188 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8f36d3f-f478-4067-b71b-c799da7e07d9" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851201 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65c32786-ef8f-4498-aaef-4ec1dcebc57d" containerName="glance-db-sync" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851208 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="65c32786-ef8f-4498-aaef-4ec1dcebc57d" containerName="glance-db-sync" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851218 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="729bdb41-4e49-4df6-a581-87fb0db6f3a0" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851225 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="729bdb41-4e49-4df6-a581-87fb0db6f3a0" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851236 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cb80edb-5104-4f39-b8a5-2c285bdc1ff1" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851244 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cb80edb-5104-4f39-b8a5-2c285bdc1ff1" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851263 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc033b9f-32ee-44ed-85b1-4655c687ffe9" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851271 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc033b9f-32ee-44ed-85b1-4655c687ffe9" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851284 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9dc758e-58fd-4232-adf3-f9c9de238a9f" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851292 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9dc758e-58fd-4232-adf3-f9c9de238a9f" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851304 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" containerName="init" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851311 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" containerName="init" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851325 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" containerName="dnsmasq-dns" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851333 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" containerName="dnsmasq-dns" Jan 22 05:35:48 crc kubenswrapper[4814]: E0122 05:35:48.851346 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3631751f-0878-4972-a191-ff026a644832" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851354 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3631751f-0878-4972-a191-ff026a644832" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851529 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c71918d0-b384-4bf5-b8ee-a338ff72d9e9" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851546 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="65c32786-ef8f-4498-aaef-4ec1dcebc57d" containerName="glance-db-sync" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851558 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="3631751f-0878-4972-a191-ff026a644832" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851572 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" containerName="dnsmasq-dns" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851595 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851607 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="729bdb41-4e49-4df6-a581-87fb0db6f3a0" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851620 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9340e06-3a50-4f01-9314-44e5786484e1" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851668 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9dc758e-58fd-4232-adf3-f9c9de238a9f" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851681 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc033b9f-32ee-44ed-85b1-4655c687ffe9" containerName="mariadb-database-create" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851694 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8f36d3f-f478-4067-b71b-c799da7e07d9" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.851706 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cb80edb-5104-4f39-b8a5-2c285bdc1ff1" containerName="mariadb-account-create-update" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.852731 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:48 crc kubenswrapper[4814]: I0122 05:35:48.866874 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-tl768"] Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.047185 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd6hj\" (UniqueName: \"kubernetes.io/projected/a997be63-4fac-43b7-878c-a80813dc26a2-kube-api-access-fd6hj\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.047248 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.047346 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-config\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.047391 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.047461 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.047514 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.149494 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.149544 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.149608 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd6hj\" (UniqueName: \"kubernetes.io/projected/a997be63-4fac-43b7-878c-a80813dc26a2-kube-api-access-fd6hj\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.149653 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.149675 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-config\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.149691 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.150457 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-nb\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.150518 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-svc\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.151014 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-sb\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.151380 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-swift-storage-0\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.155226 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-config\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.193845 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd6hj\" (UniqueName: \"kubernetes.io/projected/a997be63-4fac-43b7-878c-a80813dc26a2-kube-api-access-fd6hj\") pod \"dnsmasq-dns-7ff5475cc9-tl768\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.489453 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:49 crc kubenswrapper[4814]: I0122 05:35:49.940753 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-tl768"] Jan 22 05:35:50 crc kubenswrapper[4814]: I0122 05:35:50.352338 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33" path="/var/lib/kubelet/pods/a0c9ee0b-dd3a-4bd0-abc7-162fdc023f33/volumes" Jan 22 05:35:50 crc kubenswrapper[4814]: I0122 05:35:50.466017 4814 generic.go:334] "Generic (PLEG): container finished" podID="b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979" containerID="326e264611b4534e81b504b48543ea7a88696075c43b9f4c3e976f8940bdbc8e" exitCode=0 Jan 22 05:35:50 crc kubenswrapper[4814]: I0122 05:35:50.466088 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4lt4r" event={"ID":"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979","Type":"ContainerDied","Data":"326e264611b4534e81b504b48543ea7a88696075c43b9f4c3e976f8940bdbc8e"} Jan 22 05:35:50 crc kubenswrapper[4814]: I0122 05:35:50.467529 4814 generic.go:334] "Generic (PLEG): container finished" podID="a997be63-4fac-43b7-878c-a80813dc26a2" containerID="bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08" exitCode=0 Jan 22 05:35:50 crc kubenswrapper[4814]: I0122 05:35:50.467590 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" event={"ID":"a997be63-4fac-43b7-878c-a80813dc26a2","Type":"ContainerDied","Data":"bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08"} Jan 22 05:35:50 crc kubenswrapper[4814]: I0122 05:35:50.467672 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" event={"ID":"a997be63-4fac-43b7-878c-a80813dc26a2","Type":"ContainerStarted","Data":"effd115331a05a4bca53aedfd516aac10b9e219aeb3f1ce7ea5f4dcd36b528b2"} Jan 22 05:35:51 crc kubenswrapper[4814]: I0122 05:35:51.479028 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" event={"ID":"a997be63-4fac-43b7-878c-a80813dc26a2","Type":"ContainerStarted","Data":"bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c"} Jan 22 05:35:51 crc kubenswrapper[4814]: I0122 05:35:51.518651 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" podStartSLOduration=3.518603836 podStartE2EDuration="3.518603836s" podCreationTimestamp="2026-01-22 05:35:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:51.501766424 +0000 UTC m=+1037.585254679" watchObservedRunningTime="2026-01-22 05:35:51.518603836 +0000 UTC m=+1037.602092061" Jan 22 05:35:51 crc kubenswrapper[4814]: I0122 05:35:51.838727 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:51 crc kubenswrapper[4814]: I0122 05:35:51.997706 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcjbl\" (UniqueName: \"kubernetes.io/projected/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-kube-api-access-dcjbl\") pod \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " Jan 22 05:35:51 crc kubenswrapper[4814]: I0122 05:35:51.998005 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-combined-ca-bundle\") pod \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " Jan 22 05:35:51 crc kubenswrapper[4814]: I0122 05:35:51.998101 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-config-data\") pod \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\" (UID: \"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979\") " Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.007809 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-kube-api-access-dcjbl" (OuterVolumeSpecName: "kube-api-access-dcjbl") pod "b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979" (UID: "b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979"). InnerVolumeSpecName "kube-api-access-dcjbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.083802 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-config-data" (OuterVolumeSpecName: "config-data") pod "b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979" (UID: "b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.090706 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979" (UID: "b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.099751 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcjbl\" (UniqueName: \"kubernetes.io/projected/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-kube-api-access-dcjbl\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.099776 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.099786 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.488379 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-4lt4r" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.488381 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-4lt4r" event={"ID":"b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979","Type":"ContainerDied","Data":"eb38b678a909fd884d4297ac53906a5701383c8ba9666bb968cbaa2a56fd58f9"} Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.488446 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb38b678a909fd884d4297ac53906a5701383c8ba9666bb968cbaa2a56fd58f9" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.489768 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.723387 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-tl768"] Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.753942 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-6gbx9"] Jan 22 05:35:52 crc kubenswrapper[4814]: E0122 05:35:52.754376 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979" containerName="keystone-db-sync" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.754400 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979" containerName="keystone-db-sync" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.754610 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979" containerName="keystone-db-sync" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.755237 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.757825 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.758095 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8jbhz" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.759419 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.759599 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.769677 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.796305 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6gbx9"] Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.812100 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8"] Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.813424 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.839846 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8"] Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913471 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-979kc\" (UniqueName: \"kubernetes.io/projected/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-kube-api-access-979kc\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913529 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-fernet-keys\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913552 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-scripts\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913566 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-credential-keys\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913587 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913610 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-config-data\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913647 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913677 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-combined-ca-bundle\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913699 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-config\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913727 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjbng\" (UniqueName: \"kubernetes.io/projected/d939d7b8-86dd-4adb-a891-92f42dd4a29c-kube-api-access-hjbng\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913746 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-svc\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.913793 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-swift-storage-0\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.978497 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-n8qnk"] Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.979467 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.989879 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-ffkm2" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.993466 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.997003 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-mk8qf"] Jan 22 05:35:52 crc kubenswrapper[4814]: I0122 05:35:52.997986 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019236 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-fernet-keys\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019271 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-scripts\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019289 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-credential-keys\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019314 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019337 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-config-data\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019359 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019386 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-combined-ca-bundle\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019406 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-config\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019436 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjbng\" (UniqueName: \"kubernetes.io/projected/d939d7b8-86dd-4adb-a891-92f42dd4a29c-kube-api-access-hjbng\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019458 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-svc\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019502 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-swift-storage-0\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.019536 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-979kc\" (UniqueName: \"kubernetes.io/projected/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-kube-api-access-979kc\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.020186 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-n8qnk"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.020381 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-nb\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.021750 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-sb\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.021767 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-svc\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.022478 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-swift-storage-0\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.023963 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-config\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.027146 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-fernet-keys\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.027197 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-combined-ca-bundle\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.030897 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-scripts\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.033203 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-config-data\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.037015 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-b7c9r" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.037251 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.050017 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-credential-keys\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.050293 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.073480 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-mk8qf"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122409 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-db-sync-config-data\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122488 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-config-data\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122551 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcwpn\" (UniqueName: \"kubernetes.io/projected/6af8690c-751e-4196-b6f4-db21950c5ec7-kube-api-access-vcwpn\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122571 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-scripts\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122609 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-combined-ca-bundle\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122651 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bhfz\" (UniqueName: \"kubernetes.io/projected/81f9b3f5-db07-49ef-933f-ef90f1c017f6-kube-api-access-8bhfz\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122677 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-config-data\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122696 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-combined-ca-bundle\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.122727 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6af8690c-751e-4196-b6f4-db21950c5ec7-etc-machine-id\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.183889 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-fd796464f-s7864"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.194657 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-979kc\" (UniqueName: \"kubernetes.io/projected/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-kube-api-access-979kc\") pod \"dnsmasq-dns-5c5cc7c5ff-vhgn8\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.195672 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.199606 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjbng\" (UniqueName: \"kubernetes.io/projected/d939d7b8-86dd-4adb-a891-92f42dd4a29c-kube-api-access-hjbng\") pod \"keystone-bootstrap-6gbx9\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.207217 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.207495 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-snnzg" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.208157 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.208240 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224212 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-config-data\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224281 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcwpn\" (UniqueName: \"kubernetes.io/projected/6af8690c-751e-4196-b6f4-db21950c5ec7-kube-api-access-vcwpn\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224305 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-scripts\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224338 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-combined-ca-bundle\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224360 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bhfz\" (UniqueName: \"kubernetes.io/projected/81f9b3f5-db07-49ef-933f-ef90f1c017f6-kube-api-access-8bhfz\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224381 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-config-data\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224400 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-combined-ca-bundle\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224436 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6af8690c-751e-4196-b6f4-db21950c5ec7-etc-machine-id\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.224472 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-db-sync-config-data\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.244010 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6af8690c-751e-4196-b6f4-db21950c5ec7-etc-machine-id\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.244619 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-db-sync-config-data\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.245962 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-fd796464f-s7864"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.254243 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-combined-ca-bundle\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.261021 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-scripts\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.268141 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-config-data\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.275339 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-combined-ca-bundle\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.287881 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-config-data\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.305686 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-rpl5x"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.307210 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.315375 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bhfz\" (UniqueName: \"kubernetes.io/projected/81f9b3f5-db07-49ef-933f-ef90f1c017f6-kube-api-access-8bhfz\") pod \"heat-db-sync-n8qnk\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.327285 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-config-data\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.327356 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8514d901-6322-4fb5-ad48-5c925db3b7de-logs\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.327398 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8t2d\" (UniqueName: \"kubernetes.io/projected/8514d901-6322-4fb5-ad48-5c925db3b7de-kube-api-access-x8t2d\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.327438 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-scripts\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.327509 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8514d901-6322-4fb5-ad48-5c925db3b7de-horizon-secret-key\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.330212 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.330416 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.330553 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-n6pdm" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.338942 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rpl5x"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.355058 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcwpn\" (UniqueName: \"kubernetes.io/projected/6af8690c-751e-4196-b6f4-db21950c5ec7-kube-api-access-vcwpn\") pod \"cinder-db-sync-mk8qf\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.382550 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.430038 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8514d901-6322-4fb5-ad48-5c925db3b7de-logs\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.430100 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wkqg\" (UniqueName: \"kubernetes.io/projected/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-kube-api-access-6wkqg\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.430143 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8t2d\" (UniqueName: \"kubernetes.io/projected/8514d901-6322-4fb5-ad48-5c925db3b7de-kube-api-access-x8t2d\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.430188 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-scripts\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.430253 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-config\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.430275 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8514d901-6322-4fb5-ad48-5c925db3b7de-horizon-secret-key\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.430298 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-combined-ca-bundle\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.430359 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-config-data\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.431446 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-config-data\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.434219 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.435581 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8514d901-6322-4fb5-ad48-5c925db3b7de-logs\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.436895 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-scripts\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.441912 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8514d901-6322-4fb5-ad48-5c925db3b7de-horizon-secret-key\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.479048 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8t2d\" (UniqueName: \"kubernetes.io/projected/8514d901-6322-4fb5-ad48-5c925db3b7de-kube-api-access-x8t2d\") pod \"horizon-fd796464f-s7864\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.533510 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-config\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.533567 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-combined-ca-bundle\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.533669 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wkqg\" (UniqueName: \"kubernetes.io/projected/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-kube-api-access-6wkqg\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.538751 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-config\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.545170 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-combined-ca-bundle\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.547847 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-768fd6449-lhdq7"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.549137 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.593921 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-n8qnk" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.594519 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.596778 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.605185 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.605434 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.622376 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wkqg\" (UniqueName: \"kubernetes.io/projected/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-kube-api-access-6wkqg\") pod \"neutron-db-sync-rpl5x\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.624101 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.639350 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d30d0a83-bcbe-4915-ba04-dbbeee62022a-horizon-secret-key\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.639579 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-config-data\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.639598 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phn6r\" (UniqueName: \"kubernetes.io/projected/d30d0a83-bcbe-4915-ba04-dbbeee62022a-kube-api-access-phn6r\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.639619 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d30d0a83-bcbe-4915-ba04-dbbeee62022a-logs\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.639670 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-scripts\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.648669 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-768fd6449-lhdq7"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.668865 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.683463 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-vd5qz"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.688145 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.695015 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.695261 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.695673 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-r9hqf" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.701660 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-rfq2m"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.702621 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.713951 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-dvlml" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.714151 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.735803 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fd796464f-s7864" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749655 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-run-httpd\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749694 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-config-data\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749724 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749748 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-config-data\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749765 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phn6r\" (UniqueName: \"kubernetes.io/projected/d30d0a83-bcbe-4915-ba04-dbbeee62022a-kube-api-access-phn6r\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749785 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d30d0a83-bcbe-4915-ba04-dbbeee62022a-logs\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749816 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-scripts\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749848 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d30d0a83-bcbe-4915-ba04-dbbeee62022a-horizon-secret-key\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749871 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22ff8\" (UniqueName: \"kubernetes.io/projected/bd231434-0d02-4b13-9a72-c31277deeacf-kube-api-access-22ff8\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749894 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-scripts\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749918 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-log-httpd\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.749941 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.751100 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-config-data\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.751523 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d30d0a83-bcbe-4915-ba04-dbbeee62022a-logs\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.751980 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-scripts\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.753501 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.759878 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.781563 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vd5qz"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.801852 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d30d0a83-bcbe-4915-ba04-dbbeee62022a-horizon-secret-key\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.808470 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phn6r\" (UniqueName: \"kubernetes.io/projected/d30d0a83-bcbe-4915-ba04-dbbeee62022a-kube-api-access-phn6r\") pod \"horizon-768fd6449-lhdq7\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.832689 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-nzwkx"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.834001 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.850954 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.852228 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.854860 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-config-data\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.854902 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.854950 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w78nl\" (UniqueName: \"kubernetes.io/projected/66536b14-9f47-4fe1-bc77-583a4ffff700-kube-api-access-w78nl\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.854973 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-db-sync-config-data\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855014 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-config-data\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855031 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-scripts\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855048 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-combined-ca-bundle\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855087 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22ff8\" (UniqueName: \"kubernetes.io/projected/bd231434-0d02-4b13-9a72-c31277deeacf-kube-api-access-22ff8\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855110 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-scripts\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855134 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-log-httpd\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855153 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855170 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-combined-ca-bundle\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855206 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/458612b9-c230-4db2-82d3-0a1b8fbe81f1-logs\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855237 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdsgh\" (UniqueName: \"kubernetes.io/projected/458612b9-c230-4db2-82d3-0a1b8fbe81f1-kube-api-access-zdsgh\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.855270 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-run-httpd\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.856930 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.857116 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-jnz8l" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.857269 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.857758 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.880090 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-log-httpd\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.880347 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.898572 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-run-httpd\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.898933 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.913424 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-rfq2m"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.944174 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22ff8\" (UniqueName: \"kubernetes.io/projected/bd231434-0d02-4b13-9a72-c31277deeacf-kube-api-access-22ff8\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.951666 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-config-data\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.952821 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.962314 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-scripts\") pod \"ceilometer-0\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " pod="openstack/ceilometer-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963333 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-scripts\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963373 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-combined-ca-bundle\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963404 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963445 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963470 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963516 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-logs\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963556 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-combined-ca-bundle\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963595 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-config\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963645 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/458612b9-c230-4db2-82d3-0a1b8fbe81f1-logs\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.963683 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.971913 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-scripts\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.977725 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdsgh\" (UniqueName: \"kubernetes.io/projected/458612b9-c230-4db2-82d3-0a1b8fbe81f1-kube-api-access-zdsgh\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.977802 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-scripts\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.977855 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.977886 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-config-data\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.977921 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqh8r\" (UniqueName: \"kubernetes.io/projected/b811225b-766e-41ce-a810-bbf95e5f4e3b-kube-api-access-mqh8r\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.977944 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z527t\" (UniqueName: \"kubernetes.io/projected/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-kube-api-access-z527t\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.977969 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w78nl\" (UniqueName: \"kubernetes.io/projected/66536b14-9f47-4fe1-bc77-583a4ffff700-kube-api-access-w78nl\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.978013 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-db-sync-config-data\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.978064 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.978083 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.978104 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.978141 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-config-data\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.979217 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-nzwkx"] Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.995036 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-db-sync-config-data\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:53 crc kubenswrapper[4814]: I0122 05:35:53.998934 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/458612b9-c230-4db2-82d3-0a1b8fbe81f1-logs\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.005165 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-combined-ca-bundle\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.006866 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-combined-ca-bundle\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.007869 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-config-data\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.027722 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.073098 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdsgh\" (UniqueName: \"kubernetes.io/projected/458612b9-c230-4db2-82d3-0a1b8fbe81f1-kube-api-access-zdsgh\") pod \"placement-db-sync-vd5qz\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.073375 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w78nl\" (UniqueName: \"kubernetes.io/projected/66536b14-9f47-4fe1-bc77-583a4ffff700-kube-api-access-w78nl\") pod \"barbican-db-sync-rfq2m\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086380 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086424 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-scripts\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086452 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086478 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-config-data\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086497 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqh8r\" (UniqueName: \"kubernetes.io/projected/b811225b-766e-41ce-a810-bbf95e5f4e3b-kube-api-access-mqh8r\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086521 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z527t\" (UniqueName: \"kubernetes.io/projected/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-kube-api-access-z527t\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086551 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086569 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086586 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086612 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086652 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086687 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086714 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-logs\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.086751 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-config\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.087533 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-config\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.090060 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.091696 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.101048 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.101318 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-logs\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.101596 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.101891 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.102138 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.102392 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.104346 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-config-data\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.110072 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.110538 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-scripts\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.128295 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z527t\" (UniqueName: \"kubernetes.io/projected/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-kube-api-access-z527t\") pod \"dnsmasq-dns-8b5c85b87-nzwkx\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.147934 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqh8r\" (UniqueName: \"kubernetes.io/projected/b811225b-766e-41ce-a810-bbf95e5f4e3b-kube-api-access-mqh8r\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.174981 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.240111 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.272563 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.310263 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.340053 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.342445 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vd5qz" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.363159 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.366202 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.375845 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.470708 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-logs\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.470762 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.470781 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.470810 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.470860 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mmxj\" (UniqueName: \"kubernetes.io/projected/4b01d155-d948-46cb-b5b7-e510da29c9a0-kube-api-access-6mmxj\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.470875 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.470911 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.470938 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.474789 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.510120 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.572281 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-logs\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.572341 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.572361 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.572391 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.572446 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mmxj\" (UniqueName: \"kubernetes.io/projected/4b01d155-d948-46cb-b5b7-e510da29c9a0-kube-api-access-6mmxj\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.572462 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.572499 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.572516 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.581180 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.581440 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-logs\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.582517 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.582608 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.586249 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.587870 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" podUID="a997be63-4fac-43b7-878c-a80813dc26a2" containerName="dnsmasq-dns" containerID="cri-o://bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c" gracePeriod=10 Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.588720 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.589171 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.624417 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mmxj\" (UniqueName: \"kubernetes.io/projected/4b01d155-d948-46cb-b5b7-e510da29c9a0-kube-api-access-6mmxj\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.689161 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.723755 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.732546 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-6gbx9"] Jan 22 05:35:54 crc kubenswrapper[4814]: I0122 05:35:54.756855 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8"] Jan 22 05:35:54 crc kubenswrapper[4814]: W0122 05:35:54.828965 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8cdd96e_dfa9_4364_bed4_f7fd50d10fd4.slice/crio-fbbcc07239cb0dd20b185976f7e3c2179353da98433c20efa83eacf5a0adc580 WatchSource:0}: Error finding container fbbcc07239cb0dd20b185976f7e3c2179353da98433c20efa83eacf5a0adc580: Status 404 returned error can't find the container with id fbbcc07239cb0dd20b185976f7e3c2179353da98433c20efa83eacf5a0adc580 Jan 22 05:35:54 crc kubenswrapper[4814]: W0122 05:35:54.845953 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd939d7b8_86dd_4adb_a891_92f42dd4a29c.slice/crio-cb6a7649b6f6b2155a9d0297d3b2e0c6938804c1fc40abc6f057d13a0edf5f5a WatchSource:0}: Error finding container cb6a7649b6f6b2155a9d0297d3b2e0c6938804c1fc40abc6f057d13a0edf5f5a: Status 404 returned error can't find the container with id cb6a7649b6f6b2155a9d0297d3b2e0c6938804c1fc40abc6f057d13a0edf5f5a Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.054822 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-mk8qf"] Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.058880 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-n8qnk"] Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.284584 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-fd796464f-s7864"] Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.478489 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-nzwkx"] Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.518021 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-vd5qz"] Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.537298 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rpl5x"] Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.549497 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.570602 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-rfq2m"] Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.623814 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-mk8qf" event={"ID":"6af8690c-751e-4196-b6f4-db21950c5ec7","Type":"ContainerStarted","Data":"83f451f60aecdd9e05226492e9df2bc9c22cda5197b753fae2a9eded0a67df76"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.637975 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-n8qnk" event={"ID":"81f9b3f5-db07-49ef-933f-ef90f1c017f6","Type":"ContainerStarted","Data":"309f30eb3ec2d57289e1532f93129bcbb0e9921deae5b72eebfe67ca7739fed2"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.639460 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6gbx9" event={"ID":"d939d7b8-86dd-4adb-a891-92f42dd4a29c","Type":"ContainerStarted","Data":"cb6a7649b6f6b2155a9d0297d3b2e0c6938804c1fc40abc6f057d13a0edf5f5a"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.641599 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vd5qz" event={"ID":"458612b9-c230-4db2-82d3-0a1b8fbe81f1","Type":"ContainerStarted","Data":"e4d4d5bab8fb53cf3251c5fde6a37fa497a2b849e55fa0d5ba8567c28399c67e"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.643152 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerStarted","Data":"d3cfb9f7029afb5c97fa0777215f7e30f0dc39f7aced1acd835af2b0af7dd763"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.652519 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-rfq2m" event={"ID":"66536b14-9f47-4fe1-bc77-583a4ffff700","Type":"ContainerStarted","Data":"ed1a4542df49686e3ea3e305cfa24d4f33973ceadc34a65f26cee75557ed7a03"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.671251 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rpl5x" event={"ID":"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca","Type":"ContainerStarted","Data":"db18d922924f435348b22eb70d8dd6e11f79fa1a70028083cb6af2410f62b225"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.675249 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" event={"ID":"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4","Type":"ContainerStarted","Data":"fbbcc07239cb0dd20b185976f7e3c2179353da98433c20efa83eacf5a0adc580"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.682706 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fd796464f-s7864" event={"ID":"8514d901-6322-4fb5-ad48-5c925db3b7de","Type":"ContainerStarted","Data":"0fef9e0b3bc10692bcabfc69d05bc39f4f5b4952b27b170257a6395d94250646"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.694863 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" event={"ID":"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d","Type":"ContainerStarted","Data":"a178339288e62da6744186708dbb26a2d407dc5bef5466e7d5f6f9601e676ef0"} Jan 22 05:35:55 crc kubenswrapper[4814]: I0122 05:35:55.804618 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.085869 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.452952 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-768fd6449-lhdq7"] Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.554780 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.577209 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.617997 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-768fd6449-lhdq7"] Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.724454 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-8446cbc4d9-qf5ps"] Jan 22 05:35:56 crc kubenswrapper[4814]: E0122 05:35:56.724868 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a997be63-4fac-43b7-878c-a80813dc26a2" containerName="dnsmasq-dns" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.724881 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a997be63-4fac-43b7-878c-a80813dc26a2" containerName="dnsmasq-dns" Jan 22 05:35:56 crc kubenswrapper[4814]: E0122 05:35:56.724905 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a997be63-4fac-43b7-878c-a80813dc26a2" containerName="init" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.724913 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a997be63-4fac-43b7-878c-a80813dc26a2" containerName="init" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.725107 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a997be63-4fac-43b7-878c-a80813dc26a2" containerName="dnsmasq-dns" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.726145 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.740251 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8446cbc4d9-qf5ps"] Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764338 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-sb\") pod \"a997be63-4fac-43b7-878c-a80813dc26a2\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764446 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-swift-storage-0\") pod \"a997be63-4fac-43b7-878c-a80813dc26a2\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764482 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-config\") pod \"a997be63-4fac-43b7-878c-a80813dc26a2\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764522 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-nb\") pod \"a997be63-4fac-43b7-878c-a80813dc26a2\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764558 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fd6hj\" (UniqueName: \"kubernetes.io/projected/a997be63-4fac-43b7-878c-a80813dc26a2-kube-api-access-fd6hj\") pod \"a997be63-4fac-43b7-878c-a80813dc26a2\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764583 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-svc\") pod \"a997be63-4fac-43b7-878c-a80813dc26a2\" (UID: \"a997be63-4fac-43b7-878c-a80813dc26a2\") " Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764869 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-scripts\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764895 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-config-data\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764922 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-horizon-secret-key\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764961 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp7w8\" (UniqueName: \"kubernetes.io/projected/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-kube-api-access-rp7w8\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.764983 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-logs\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.779688 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.835398 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a997be63-4fac-43b7-878c-a80813dc26a2-kube-api-access-fd6hj" (OuterVolumeSpecName: "kube-api-access-fd6hj") pod "a997be63-4fac-43b7-878c-a80813dc26a2" (UID: "a997be63-4fac-43b7-878c-a80813dc26a2"). InnerVolumeSpecName "kube-api-access-fd6hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.841031 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6gbx9" event={"ID":"d939d7b8-86dd-4adb-a891-92f42dd4a29c","Type":"ContainerStarted","Data":"4c739bed32d8de7c967b4984ad01401552654bcc8e3c75ddb35c78a22b2836f9"} Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.862283 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.877884 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp7w8\" (UniqueName: \"kubernetes.io/projected/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-kube-api-access-rp7w8\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.878008 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-logs\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.878283 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-scripts\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.878356 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-config-data\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.878431 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-horizon-secret-key\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.878523 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fd6hj\" (UniqueName: \"kubernetes.io/projected/a997be63-4fac-43b7-878c-a80813dc26a2-kube-api-access-fd6hj\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.886133 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-logs\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.886861 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-scripts\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.888348 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-config-data\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.894688 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-6gbx9" podStartSLOduration=4.894669824 podStartE2EDuration="4.894669824s" podCreationTimestamp="2026-01-22 05:35:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:56.885597773 +0000 UTC m=+1042.969085988" watchObservedRunningTime="2026-01-22 05:35:56.894669824 +0000 UTC m=+1042.978158039" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.900156 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-768fd6449-lhdq7" event={"ID":"d30d0a83-bcbe-4915-ba04-dbbeee62022a","Type":"ContainerStarted","Data":"997a5d64ab12ad468d1d1e73e9fe2a63d46d38592fc69cfe28bbfd2ba41556a3"} Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.928957 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b811225b-766e-41ce-a810-bbf95e5f4e3b","Type":"ContainerStarted","Data":"f8c98d3c6f12be47aec73ef8ebd6c66f88669df2534ff922aa3d1aab8c137c34"} Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.971081 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp7w8\" (UniqueName: \"kubernetes.io/projected/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-kube-api-access-rp7w8\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.972595 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-horizon-secret-key\") pod \"horizon-8446cbc4d9-qf5ps\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:56 crc kubenswrapper[4814]: I0122 05:35:56.981368 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rpl5x" event={"ID":"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca","Type":"ContainerStarted","Data":"99c910021a2c2854670a7428a68423d92923a30fef890f0fb706885e74d6d973"} Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.020759 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a997be63-4fac-43b7-878c-a80813dc26a2" (UID: "a997be63-4fac-43b7-878c-a80813dc26a2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.028139 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-rpl5x" podStartSLOduration=4.02811348 podStartE2EDuration="4.02811348s" podCreationTimestamp="2026-01-22 05:35:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:35:57.018359617 +0000 UTC m=+1043.101847832" watchObservedRunningTime="2026-01-22 05:35:57.02811348 +0000 UTC m=+1043.111601715" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.029843 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a997be63-4fac-43b7-878c-a80813dc26a2" (UID: "a997be63-4fac-43b7-878c-a80813dc26a2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.036393 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-config" (OuterVolumeSpecName: "config") pod "a997be63-4fac-43b7-878c-a80813dc26a2" (UID: "a997be63-4fac-43b7-878c-a80813dc26a2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.039920 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b01d155-d948-46cb-b5b7-e510da29c9a0","Type":"ContainerStarted","Data":"c3820d3b6495d87959334e35014b481897dbc05dbaebe632ad63c7d5a5b98ac4"} Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.040127 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a997be63-4fac-43b7-878c-a80813dc26a2" (UID: "a997be63-4fac-43b7-878c-a80813dc26a2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.052162 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a997be63-4fac-43b7-878c-a80813dc26a2" (UID: "a997be63-4fac-43b7-878c-a80813dc26a2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.052567 4814 generic.go:334] "Generic (PLEG): container finished" podID="d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" containerID="e9c970d025db9be9a963e0b1d24020ce1d4991866b7670461ddfe1794e1df212" exitCode=0 Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.052641 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" event={"ID":"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4","Type":"ContainerDied","Data":"e9c970d025db9be9a963e0b1d24020ce1d4991866b7670461ddfe1794e1df212"} Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.086062 4814 generic.go:334] "Generic (PLEG): container finished" podID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerID="bc1a6f38726e7ad1797308a8a261f1f3b61931df828b6b03371101094e4c74e2" exitCode=0 Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.086127 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" event={"ID":"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d","Type":"ContainerDied","Data":"bc1a6f38726e7ad1797308a8a261f1f3b61931df828b6b03371101094e4c74e2"} Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.099519 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.099553 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.099565 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.099576 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.099584 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a997be63-4fac-43b7-878c-a80813dc26a2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.132515 4814 generic.go:334] "Generic (PLEG): container finished" podID="a997be63-4fac-43b7-878c-a80813dc26a2" containerID="bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c" exitCode=0 Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.132560 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" event={"ID":"a997be63-4fac-43b7-878c-a80813dc26a2","Type":"ContainerDied","Data":"bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c"} Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.132592 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" event={"ID":"a997be63-4fac-43b7-878c-a80813dc26a2","Type":"ContainerDied","Data":"effd115331a05a4bca53aedfd516aac10b9e219aeb3f1ce7ea5f4dcd36b528b2"} Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.132619 4814 scope.go:117] "RemoveContainer" containerID="bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.132811 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7ff5475cc9-tl768" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.168543 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.315681 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-tl768"] Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.326588 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7ff5475cc9-tl768"] Jan 22 05:35:57 crc kubenswrapper[4814]: I0122 05:35:57.605543 4814 scope.go:117] "RemoveContainer" containerID="bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08" Jan 22 05:35:58 crc kubenswrapper[4814]: I0122 05:35:58.224091 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8446cbc4d9-qf5ps"] Jan 22 05:35:58 crc kubenswrapper[4814]: I0122 05:35:58.381002 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a997be63-4fac-43b7-878c-a80813dc26a2" path="/var/lib/kubelet/pods/a997be63-4fac-43b7-878c-a80813dc26a2/volumes" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.384641 4814 scope.go:117] "RemoveContainer" containerID="bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c" Jan 22 05:35:59 crc kubenswrapper[4814]: E0122 05:35:59.387488 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c\": container with ID starting with bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c not found: ID does not exist" containerID="bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.387535 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c"} err="failed to get container status \"bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c\": rpc error: code = NotFound desc = could not find container \"bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c\": container with ID starting with bea8affc38c7df03c5514136447d0174a0de65f0f091a1b72fa1011e3abe080c not found: ID does not exist" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.387565 4814 scope.go:117] "RemoveContainer" containerID="bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08" Jan 22 05:35:59 crc kubenswrapper[4814]: W0122 05:35:59.397010 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeccfbe4e_3954_4ff0_964e_4dfbc753fdb1.slice/crio-08dde9579ce29c12212eb0fe745560b58e4ce8b88acff862ae0fcbdcba050f29 WatchSource:0}: Error finding container 08dde9579ce29c12212eb0fe745560b58e4ce8b88acff862ae0fcbdcba050f29: Status 404 returned error can't find the container with id 08dde9579ce29c12212eb0fe745560b58e4ce8b88acff862ae0fcbdcba050f29 Jan 22 05:35:59 crc kubenswrapper[4814]: E0122 05:35:59.398329 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08\": container with ID starting with bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08 not found: ID does not exist" containerID="bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.398422 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08"} err="failed to get container status \"bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08\": rpc error: code = NotFound desc = could not find container \"bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08\": container with ID starting with bb687e7d594c8a066415547f8679806a3c23051bd626789613f0eedd70170f08 not found: ID does not exist" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.528050 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.590508 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-sb\") pod \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.590580 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-config\") pod \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.590689 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-979kc\" (UniqueName: \"kubernetes.io/projected/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-kube-api-access-979kc\") pod \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.590730 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-nb\") pod \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.590760 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-svc\") pod \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.590793 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-swift-storage-0\") pod \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\" (UID: \"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4\") " Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.618819 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-kube-api-access-979kc" (OuterVolumeSpecName: "kube-api-access-979kc") pod "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" (UID: "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4"). InnerVolumeSpecName "kube-api-access-979kc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.639496 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" (UID: "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.653318 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" (UID: "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.657473 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" (UID: "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.678750 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" (UID: "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.685412 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-config" (OuterVolumeSpecName: "config") pod "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" (UID: "d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.695224 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.695249 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.695259 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-979kc\" (UniqueName: \"kubernetes.io/projected/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-kube-api-access-979kc\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.695270 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.695279 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:35:59 crc kubenswrapper[4814]: I0122 05:35:59.695288 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.263512 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b811225b-766e-41ce-a810-bbf95e5f4e3b","Type":"ContainerStarted","Data":"d0463c0fc7cf743707a16a34781b69dc8fc0bd7c04e8c7e27040f198dcabd2c2"} Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.273669 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b01d155-d948-46cb-b5b7-e510da29c9a0","Type":"ContainerStarted","Data":"006cb23e2f9f73a1813492c70c246ea2f1fd0d26f2646106455aec21ff544b13"} Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.285704 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" event={"ID":"d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4","Type":"ContainerDied","Data":"fbbcc07239cb0dd20b185976f7e3c2179353da98433c20efa83eacf5a0adc580"} Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.285752 4814 scope.go:117] "RemoveContainer" containerID="e9c970d025db9be9a963e0b1d24020ce1d4991866b7670461ddfe1794e1df212" Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.285843 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8" Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.298838 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" event={"ID":"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d","Type":"ContainerStarted","Data":"7508e78fc668fe22363adf5d95c259d6251ffb9356ef19b19c86116d46a0a671"} Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.328590 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8446cbc4d9-qf5ps" event={"ID":"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1","Type":"ContainerStarted","Data":"08dde9579ce29c12212eb0fe745560b58e4ce8b88acff862ae0fcbdcba050f29"} Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.359559 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" podStartSLOduration=7.359538613 podStartE2EDuration="7.359538613s" podCreationTimestamp="2026-01-22 05:35:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:00.343495755 +0000 UTC m=+1046.426983971" watchObservedRunningTime="2026-01-22 05:36:00.359538613 +0000 UTC m=+1046.443026828" Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.429791 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8"] Jan 22 05:36:00 crc kubenswrapper[4814]: I0122 05:36:00.440577 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c5cc7c5ff-vhgn8"] Jan 22 05:36:01 crc kubenswrapper[4814]: I0122 05:36:01.348812 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.356507 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" path="/var/lib/kubelet/pods/d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4/volumes" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.364948 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b01d155-d948-46cb-b5b7-e510da29c9a0","Type":"ContainerStarted","Data":"6b5399e2a4309766348db84dc35daf025b73a22c06c8d2bc3ccd95fc1cdd4241"} Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.365093 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerName="glance-log" containerID="cri-o://006cb23e2f9f73a1813492c70c246ea2f1fd0d26f2646106455aec21ff544b13" gracePeriod=30 Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.365371 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerName="glance-httpd" containerID="cri-o://6b5399e2a4309766348db84dc35daf025b73a22c06c8d2bc3ccd95fc1cdd4241" gracePeriod=30 Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.377686 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b811225b-766e-41ce-a810-bbf95e5f4e3b","Type":"ContainerStarted","Data":"861f79f543bab6d5fb64e96a82b7cb455ff121ba1b2400bc6046cbcc0f7a897e"} Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.377790 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerName="glance-httpd" containerID="cri-o://861f79f543bab6d5fb64e96a82b7cb455ff121ba1b2400bc6046cbcc0f7a897e" gracePeriod=30 Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.377773 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerName="glance-log" containerID="cri-o://d0463c0fc7cf743707a16a34781b69dc8fc0bd7c04e8c7e27040f198dcabd2c2" gracePeriod=30 Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.414493 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.414475268 podStartE2EDuration="9.414475268s" podCreationTimestamp="2026-01-22 05:35:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:02.414255211 +0000 UTC m=+1048.497743446" watchObservedRunningTime="2026-01-22 05:36:02.414475268 +0000 UTC m=+1048.497963473" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.703384 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.70336173 podStartE2EDuration="9.70336173s" podCreationTimestamp="2026-01-22 05:35:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:02.44455743 +0000 UTC m=+1048.528045645" watchObservedRunningTime="2026-01-22 05:36:02.70336173 +0000 UTC m=+1048.786849945" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.712253 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-fd796464f-s7864"] Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.763156 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5fc797bd5d-f6wlm"] Jan 22 05:36:02 crc kubenswrapper[4814]: E0122 05:36:02.763505 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" containerName="init" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.763518 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" containerName="init" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.763704 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8cdd96e-dfa9-4364-bed4-f7fd50d10fd4" containerName="init" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.764574 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.774435 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.777340 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fc797bd5d-f6wlm"] Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.826797 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8446cbc4d9-qf5ps"] Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.876133 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-combined-ca-bundle\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.876207 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-config-data\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.876266 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2jxg\" (UniqueName: \"kubernetes.io/projected/50923695-9bcc-49c5-844f-6275c99729e2-kube-api-access-h2jxg\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.876305 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-secret-key\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.876342 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-scripts\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.876389 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50923695-9bcc-49c5-844f-6275c99729e2-logs\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.876414 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-tls-certs\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.884807 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-75cf549f68-bs2gm"] Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.928168 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.942388 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-75cf549f68-bs2gm"] Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985236 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50923695-9bcc-49c5-844f-6275c99729e2-logs\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985281 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-combined-ca-bundle\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985303 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-tls-certs\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985342 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-combined-ca-bundle\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985372 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-scripts\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985391 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-config-data\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985413 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-config-data\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985436 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-logs\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985471 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2jxg\" (UniqueName: \"kubernetes.io/projected/50923695-9bcc-49c5-844f-6275c99729e2-kube-api-access-h2jxg\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985488 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-horizon-secret-key\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985518 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-secret-key\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985543 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-scripts\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985562 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8r94\" (UniqueName: \"kubernetes.io/projected/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-kube-api-access-w8r94\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.985579 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-horizon-tls-certs\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:02 crc kubenswrapper[4814]: I0122 05:36:02.986720 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50923695-9bcc-49c5-844f-6275c99729e2-logs\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.001044 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-config-data\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.007280 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-secret-key\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.007819 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2jxg\" (UniqueName: \"kubernetes.io/projected/50923695-9bcc-49c5-844f-6275c99729e2-kube-api-access-h2jxg\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.008463 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-scripts\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.026418 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-tls-certs\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.039020 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-combined-ca-bundle\") pod \"horizon-5fc797bd5d-f6wlm\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.090589 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-combined-ca-bundle\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.090692 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-scripts\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.090720 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-config-data\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.090752 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-logs\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.090787 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-horizon-secret-key\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.091463 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-scripts\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.091591 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8r94\" (UniqueName: \"kubernetes.io/projected/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-kube-api-access-w8r94\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.091617 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-horizon-tls-certs\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.094489 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-logs\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.097725 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-config-data\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.099403 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.101545 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-combined-ca-bundle\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.104488 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-horizon-tls-certs\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.113950 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-horizon-secret-key\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.121417 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8r94\" (UniqueName: \"kubernetes.io/projected/d7073bb4-1466-4fe6-bb49-f91bbee77dbd-kube-api-access-w8r94\") pod \"horizon-75cf549f68-bs2gm\" (UID: \"d7073bb4-1466-4fe6-bb49-f91bbee77dbd\") " pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.284329 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.397863 4814 generic.go:334] "Generic (PLEG): container finished" podID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerID="861f79f543bab6d5fb64e96a82b7cb455ff121ba1b2400bc6046cbcc0f7a897e" exitCode=0 Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.397894 4814 generic.go:334] "Generic (PLEG): container finished" podID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerID="d0463c0fc7cf743707a16a34781b69dc8fc0bd7c04e8c7e27040f198dcabd2c2" exitCode=143 Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.397958 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b811225b-766e-41ce-a810-bbf95e5f4e3b","Type":"ContainerDied","Data":"861f79f543bab6d5fb64e96a82b7cb455ff121ba1b2400bc6046cbcc0f7a897e"} Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.397985 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b811225b-766e-41ce-a810-bbf95e5f4e3b","Type":"ContainerDied","Data":"d0463c0fc7cf743707a16a34781b69dc8fc0bd7c04e8c7e27040f198dcabd2c2"} Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.424843 4814 generic.go:334] "Generic (PLEG): container finished" podID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerID="6b5399e2a4309766348db84dc35daf025b73a22c06c8d2bc3ccd95fc1cdd4241" exitCode=0 Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.424880 4814 generic.go:334] "Generic (PLEG): container finished" podID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerID="006cb23e2f9f73a1813492c70c246ea2f1fd0d26f2646106455aec21ff544b13" exitCode=143 Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.424901 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b01d155-d948-46cb-b5b7-e510da29c9a0","Type":"ContainerDied","Data":"6b5399e2a4309766348db84dc35daf025b73a22c06c8d2bc3ccd95fc1cdd4241"} Jan 22 05:36:03 crc kubenswrapper[4814]: I0122 05:36:03.424927 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b01d155-d948-46cb-b5b7-e510da29c9a0","Type":"ContainerDied","Data":"006cb23e2f9f73a1813492c70c246ea2f1fd0d26f2646106455aec21ff544b13"} Jan 22 05:36:04 crc kubenswrapper[4814]: I0122 05:36:04.177830 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:36:04 crc kubenswrapper[4814]: I0122 05:36:04.254073 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-6rwmc"] Jan 22 05:36:04 crc kubenswrapper[4814]: I0122 05:36:04.254390 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" containerID="cri-o://888e29db900467c06d89a2ca16d7477bfcd2f0e54110ca90eae2b05a4f535deb" gracePeriod=10 Jan 22 05:36:04 crc kubenswrapper[4814]: I0122 05:36:04.446676 4814 generic.go:334] "Generic (PLEG): container finished" podID="d939d7b8-86dd-4adb-a891-92f42dd4a29c" containerID="4c739bed32d8de7c967b4984ad01401552654bcc8e3c75ddb35c78a22b2836f9" exitCode=0 Jan 22 05:36:04 crc kubenswrapper[4814]: I0122 05:36:04.446760 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6gbx9" event={"ID":"d939d7b8-86dd-4adb-a891-92f42dd4a29c","Type":"ContainerDied","Data":"4c739bed32d8de7c967b4984ad01401552654bcc8e3c75ddb35c78a22b2836f9"} Jan 22 05:36:04 crc kubenswrapper[4814]: I0122 05:36:04.487774 4814 generic.go:334] "Generic (PLEG): container finished" podID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerID="888e29db900467c06d89a2ca16d7477bfcd2f0e54110ca90eae2b05a4f535deb" exitCode=0 Jan 22 05:36:04 crc kubenswrapper[4814]: I0122 05:36:04.487829 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" event={"ID":"02fc56a5-86a0-4983-8219-0c0f4f220b7b","Type":"ContainerDied","Data":"888e29db900467c06d89a2ca16d7477bfcd2f0e54110ca90eae2b05a4f535deb"} Jan 22 05:36:06 crc kubenswrapper[4814]: I0122 05:36:06.948967 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Jan 22 05:36:11 crc kubenswrapper[4814]: I0122 05:36:11.948257 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Jan 22 05:36:15 crc kubenswrapper[4814]: E0122 05:36:15.941595 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 22 05:36:15 crc kubenswrapper[4814]: E0122 05:36:15.942371 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n9dh66bh696hf6h5dhffh5f9h59ch68dhdfh97h5d6hf7h8dh7chbh56fh584h5f9hbch7fh698h5c7h68bh5f4h549h57ch68bh5f6h98h5d7h65fq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rp7w8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-8446cbc4d9-qf5ps_openstack(eccfbe4e-3954-4ff0-964e-4dfbc753fdb1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:36:15 crc kubenswrapper[4814]: E0122 05:36:15.947377 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-8446cbc4d9-qf5ps" podUID="eccfbe4e-3954-4ff0-964e-4dfbc753fdb1" Jan 22 05:36:16 crc kubenswrapper[4814]: I0122 05:36:16.950218 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Jan 22 05:36:16 crc kubenswrapper[4814]: I0122 05:36:16.950699 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:36:20 crc kubenswrapper[4814]: E0122 05:36:20.147382 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Jan 22 05:36:20 crc kubenswrapper[4814]: E0122 05:36:20.148201 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8bhfz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-n8qnk_openstack(81f9b3f5-db07-49ef-933f-ef90f1c017f6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:36:20 crc kubenswrapper[4814]: E0122 05:36:20.149482 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-n8qnk" podUID="81f9b3f5-db07-49ef-933f-ef90f1c017f6" Jan 22 05:36:20 crc kubenswrapper[4814]: E0122 05:36:20.701656 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-n8qnk" podUID="81f9b3f5-db07-49ef-933f-ef90f1c017f6" Jan 22 05:36:21 crc kubenswrapper[4814]: I0122 05:36:21.710929 4814 generic.go:334] "Generic (PLEG): container finished" podID="6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca" containerID="99c910021a2c2854670a7428a68423d92923a30fef890f0fb706885e74d6d973" exitCode=0 Jan 22 05:36:21 crc kubenswrapper[4814]: I0122 05:36:21.711018 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rpl5x" event={"ID":"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca","Type":"ContainerDied","Data":"99c910021a2c2854670a7428a68423d92923a30fef890f0fb706885e74d6d973"} Jan 22 05:36:21 crc kubenswrapper[4814]: E0122 05:36:21.829053 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 22 05:36:21 crc kubenswrapper[4814]: E0122 05:36:21.829184 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f7h55fh585h54bh668h5f4h5cch56dh5d8hdhch65bh65ch5b6h57h5dfhcbhd4h597h56dh5bdh677hc6h4h589h58h5cdh667hfbh5bfh587h564q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-phn6r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-768fd6449-lhdq7_openstack(d30d0a83-bcbe-4915-ba04-dbbeee62022a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:36:21 crc kubenswrapper[4814]: E0122 05:36:21.833680 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-768fd6449-lhdq7" podUID="d30d0a83-bcbe-4915-ba04-dbbeee62022a" Jan 22 05:36:21 crc kubenswrapper[4814]: E0122 05:36:21.846394 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 22 05:36:21 crc kubenswrapper[4814]: E0122 05:36:21.846510 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f9h697h5c6h58fh5bdh5bfh698h684h5fch5d6h79h5f7h85h5d4h655h65hd9h64fh67bh648h654h569h65dh698h679h95hf8h57ch5fch6h56h5f5q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x8t2d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-fd796464f-s7864_openstack(8514d901-6322-4fb5-ad48-5c925db3b7de): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:36:21 crc kubenswrapper[4814]: E0122 05:36:21.849536 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-fd796464f-s7864" podUID="8514d901-6322-4fb5-ad48-5c925db3b7de" Jan 22 05:36:21 crc kubenswrapper[4814]: I0122 05:36:21.899546 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:36:21 crc kubenswrapper[4814]: I0122 05:36:21.949381 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.010926 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-config-data\") pod \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.011040 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-credential-keys\") pod \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.011080 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-scripts\") pod \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.011138 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-fernet-keys\") pod \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.011163 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-combined-ca-bundle\") pod \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.011224 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjbng\" (UniqueName: \"kubernetes.io/projected/d939d7b8-86dd-4adb-a891-92f42dd4a29c-kube-api-access-hjbng\") pod \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\" (UID: \"d939d7b8-86dd-4adb-a891-92f42dd4a29c\") " Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.019334 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d939d7b8-86dd-4adb-a891-92f42dd4a29c-kube-api-access-hjbng" (OuterVolumeSpecName: "kube-api-access-hjbng") pod "d939d7b8-86dd-4adb-a891-92f42dd4a29c" (UID: "d939d7b8-86dd-4adb-a891-92f42dd4a29c"). InnerVolumeSpecName "kube-api-access-hjbng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.034031 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-scripts" (OuterVolumeSpecName: "scripts") pod "d939d7b8-86dd-4adb-a891-92f42dd4a29c" (UID: "d939d7b8-86dd-4adb-a891-92f42dd4a29c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.033974 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d939d7b8-86dd-4adb-a891-92f42dd4a29c" (UID: "d939d7b8-86dd-4adb-a891-92f42dd4a29c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.041584 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "d939d7b8-86dd-4adb-a891-92f42dd4a29c" (UID: "d939d7b8-86dd-4adb-a891-92f42dd4a29c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.073765 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d939d7b8-86dd-4adb-a891-92f42dd4a29c" (UID: "d939d7b8-86dd-4adb-a891-92f42dd4a29c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.074865 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-config-data" (OuterVolumeSpecName: "config-data") pod "d939d7b8-86dd-4adb-a891-92f42dd4a29c" (UID: "d939d7b8-86dd-4adb-a891-92f42dd4a29c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.120797 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjbng\" (UniqueName: \"kubernetes.io/projected/d939d7b8-86dd-4adb-a891-92f42dd4a29c-kube-api-access-hjbng\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.120823 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.120835 4814 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.120845 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.120853 4814 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.120860 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d939d7b8-86dd-4adb-a891-92f42dd4a29c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.719847 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-6gbx9" Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.720792 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-6gbx9" event={"ID":"d939d7b8-86dd-4adb-a891-92f42dd4a29c","Type":"ContainerDied","Data":"cb6a7649b6f6b2155a9d0297d3b2e0c6938804c1fc40abc6f057d13a0edf5f5a"} Jan 22 05:36:22 crc kubenswrapper[4814]: I0122 05:36:22.720823 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb6a7649b6f6b2155a9d0297d3b2e0c6938804c1fc40abc6f057d13a0edf5f5a" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.082049 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-6gbx9"] Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.091851 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-6gbx9"] Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.199532 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-fvkql"] Jan 22 05:36:23 crc kubenswrapper[4814]: E0122 05:36:23.199999 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d939d7b8-86dd-4adb-a891-92f42dd4a29c" containerName="keystone-bootstrap" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.200019 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d939d7b8-86dd-4adb-a891-92f42dd4a29c" containerName="keystone-bootstrap" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.200252 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d939d7b8-86dd-4adb-a891-92f42dd4a29c" containerName="keystone-bootstrap" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.200932 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.206406 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.207713 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fvkql"] Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.208375 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8jbhz" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.208750 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.208913 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.208994 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.247173 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-scripts\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.247453 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f627g\" (UniqueName: \"kubernetes.io/projected/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-kube-api-access-f627g\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.247586 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-config-data\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.247879 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-fernet-keys\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.248016 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-credential-keys\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.248135 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-combined-ca-bundle\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.349802 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-scripts\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.349854 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f627g\" (UniqueName: \"kubernetes.io/projected/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-kube-api-access-f627g\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.349911 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-config-data\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.350052 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-fernet-keys\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.350094 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-credential-keys\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.350123 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-combined-ca-bundle\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.356754 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-fernet-keys\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.358693 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-scripts\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.363209 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-credential-keys\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.363909 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-combined-ca-bundle\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.368843 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-config-data\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.372259 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f627g\" (UniqueName: \"kubernetes.io/projected/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-kube-api-access-f627g\") pod \"keystone-bootstrap-fvkql\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:23 crc kubenswrapper[4814]: I0122 05:36:23.515861 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:24 crc kubenswrapper[4814]: I0122 05:36:24.374202 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d939d7b8-86dd-4adb-a891-92f42dd4a29c" path="/var/lib/kubelet/pods/d939d7b8-86dd-4adb-a891-92f42dd4a29c/volumes" Jan 22 05:36:24 crc kubenswrapper[4814]: I0122 05:36:24.517389 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 05:36:24 crc kubenswrapper[4814]: I0122 05:36:24.517462 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 05:36:24 crc kubenswrapper[4814]: I0122 05:36:24.725332 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:24 crc kubenswrapper[4814]: I0122 05:36:24.725405 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:31 crc kubenswrapper[4814]: I0122 05:36:31.949745 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.749121 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.778492 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rp7w8\" (UniqueName: \"kubernetes.io/projected/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-kube-api-access-rp7w8\") pod \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.778685 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-config-data\") pod \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.778726 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-horizon-secret-key\") pod \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.778853 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-scripts\") pod \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.778927 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-logs\") pod \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\" (UID: \"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1\") " Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.779704 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-config-data" (OuterVolumeSpecName: "config-data") pod "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1" (UID: "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.780362 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-logs" (OuterVolumeSpecName: "logs") pod "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1" (UID: "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.780604 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-scripts" (OuterVolumeSpecName: "scripts") pod "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1" (UID: "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.784487 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1" (UID: "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.793470 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-kube-api-access-rp7w8" (OuterVolumeSpecName: "kube-api-access-rp7w8") pod "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1" (UID: "eccfbe4e-3954-4ff0-964e-4dfbc753fdb1"). InnerVolumeSpecName "kube-api-access-rp7w8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.827921 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8446cbc4d9-qf5ps" event={"ID":"eccfbe4e-3954-4ff0-964e-4dfbc753fdb1","Type":"ContainerDied","Data":"08dde9579ce29c12212eb0fe745560b58e4ce8b88acff862ae0fcbdcba050f29"} Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.827983 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8446cbc4d9-qf5ps" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.887349 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.887403 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rp7w8\" (UniqueName: \"kubernetes.io/projected/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-kube-api-access-rp7w8\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.887424 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.887494 4814 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.887523 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.911340 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8446cbc4d9-qf5ps"] Jan 22 05:36:34 crc kubenswrapper[4814]: I0122 05:36:34.920014 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-8446cbc4d9-qf5ps"] Jan 22 05:36:35 crc kubenswrapper[4814]: E0122 05:36:35.252229 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 22 05:36:35 crc kubenswrapper[4814]: E0122 05:36:35.252446 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w78nl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-rfq2m_openstack(66536b14-9f47-4fe1-bc77-583a4ffff700): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:36:35 crc kubenswrapper[4814]: E0122 05:36:35.253655 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-rfq2m" podUID="66536b14-9f47-4fe1-bc77-583a4ffff700" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.337217 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.369281 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.412683 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.429411 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fd796464f-s7864" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.429549 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.437540 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507425 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-nb\") pod \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507488 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-swift-storage-0\") pod \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507557 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-scripts\") pod \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507591 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-config\") pod \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507674 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d30d0a83-bcbe-4915-ba04-dbbeee62022a-horizon-secret-key\") pod \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507709 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-sb\") pod \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507725 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-config-data\") pod \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507771 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-svc\") pod \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507787 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d30d0a83-bcbe-4915-ba04-dbbeee62022a-logs\") pod \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507813 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phn6r\" (UniqueName: \"kubernetes.io/projected/d30d0a83-bcbe-4915-ba04-dbbeee62022a-kube-api-access-phn6r\") pod \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\" (UID: \"d30d0a83-bcbe-4915-ba04-dbbeee62022a\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.507830 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tp72b\" (UniqueName: \"kubernetes.io/projected/02fc56a5-86a0-4983-8219-0c0f4f220b7b-kube-api-access-tp72b\") pod \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\" (UID: \"02fc56a5-86a0-4983-8219-0c0f4f220b7b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.509467 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-config-data" (OuterVolumeSpecName: "config-data") pod "d30d0a83-bcbe-4915-ba04-dbbeee62022a" (UID: "d30d0a83-bcbe-4915-ba04-dbbeee62022a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.509949 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d30d0a83-bcbe-4915-ba04-dbbeee62022a-logs" (OuterVolumeSpecName: "logs") pod "d30d0a83-bcbe-4915-ba04-dbbeee62022a" (UID: "d30d0a83-bcbe-4915-ba04-dbbeee62022a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.514829 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02fc56a5-86a0-4983-8219-0c0f4f220b7b-kube-api-access-tp72b" (OuterVolumeSpecName: "kube-api-access-tp72b") pod "02fc56a5-86a0-4983-8219-0c0f4f220b7b" (UID: "02fc56a5-86a0-4983-8219-0c0f4f220b7b"). InnerVolumeSpecName "kube-api-access-tp72b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.515409 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-scripts" (OuterVolumeSpecName: "scripts") pod "d30d0a83-bcbe-4915-ba04-dbbeee62022a" (UID: "d30d0a83-bcbe-4915-ba04-dbbeee62022a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.515784 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d30d0a83-bcbe-4915-ba04-dbbeee62022a-kube-api-access-phn6r" (OuterVolumeSpecName: "kube-api-access-phn6r") pod "d30d0a83-bcbe-4915-ba04-dbbeee62022a" (UID: "d30d0a83-bcbe-4915-ba04-dbbeee62022a"). InnerVolumeSpecName "kube-api-access-phn6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.529799 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d30d0a83-bcbe-4915-ba04-dbbeee62022a-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d30d0a83-bcbe-4915-ba04-dbbeee62022a" (UID: "d30d0a83-bcbe-4915-ba04-dbbeee62022a"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.551468 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "02fc56a5-86a0-4983-8219-0c0f4f220b7b" (UID: "02fc56a5-86a0-4983-8219-0c0f4f220b7b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.553117 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "02fc56a5-86a0-4983-8219-0c0f4f220b7b" (UID: "02fc56a5-86a0-4983-8219-0c0f4f220b7b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.560234 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-config" (OuterVolumeSpecName: "config") pod "02fc56a5-86a0-4983-8219-0c0f4f220b7b" (UID: "02fc56a5-86a0-4983-8219-0c0f4f220b7b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.561980 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "02fc56a5-86a0-4983-8219-0c0f4f220b7b" (UID: "02fc56a5-86a0-4983-8219-0c0f4f220b7b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.562597 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "02fc56a5-86a0-4983-8219-0c0f4f220b7b" (UID: "02fc56a5-86a0-4983-8219-0c0f4f220b7b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608738 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8514d901-6322-4fb5-ad48-5c925db3b7de-horizon-secret-key\") pod \"8514d901-6322-4fb5-ad48-5c925db3b7de\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608795 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-httpd-run\") pod \"b811225b-766e-41ce-a810-bbf95e5f4e3b\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608819 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-config-data\") pod \"8514d901-6322-4fb5-ad48-5c925db3b7de\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608843 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"b811225b-766e-41ce-a810-bbf95e5f4e3b\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608880 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-scripts\") pod \"8514d901-6322-4fb5-ad48-5c925db3b7de\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608896 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-scripts\") pod \"b811225b-766e-41ce-a810-bbf95e5f4e3b\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608924 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqh8r\" (UniqueName: \"kubernetes.io/projected/b811225b-766e-41ce-a810-bbf95e5f4e3b-kube-api-access-mqh8r\") pod \"b811225b-766e-41ce-a810-bbf95e5f4e3b\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608946 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-httpd-run\") pod \"4b01d155-d948-46cb-b5b7-e510da29c9a0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.608977 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-logs\") pod \"4b01d155-d948-46cb-b5b7-e510da29c9a0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609002 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8t2d\" (UniqueName: \"kubernetes.io/projected/8514d901-6322-4fb5-ad48-5c925db3b7de-kube-api-access-x8t2d\") pod \"8514d901-6322-4fb5-ad48-5c925db3b7de\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609021 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-combined-ca-bundle\") pod \"4b01d155-d948-46cb-b5b7-e510da29c9a0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609042 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-combined-ca-bundle\") pod \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609057 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-combined-ca-bundle\") pod \"b811225b-766e-41ce-a810-bbf95e5f4e3b\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609085 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-internal-tls-certs\") pod \"4b01d155-d948-46cb-b5b7-e510da29c9a0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609114 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"4b01d155-d948-46cb-b5b7-e510da29c9a0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609134 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8514d901-6322-4fb5-ad48-5c925db3b7de-logs\") pod \"8514d901-6322-4fb5-ad48-5c925db3b7de\" (UID: \"8514d901-6322-4fb5-ad48-5c925db3b7de\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609158 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-scripts\") pod \"4b01d155-d948-46cb-b5b7-e510da29c9a0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609177 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wkqg\" (UniqueName: \"kubernetes.io/projected/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-kube-api-access-6wkqg\") pod \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609230 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-logs\") pod \"b811225b-766e-41ce-a810-bbf95e5f4e3b\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609255 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mmxj\" (UniqueName: \"kubernetes.io/projected/4b01d155-d948-46cb-b5b7-e510da29c9a0-kube-api-access-6mmxj\") pod \"4b01d155-d948-46cb-b5b7-e510da29c9a0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609272 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-config\") pod \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\" (UID: \"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609289 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-config-data\") pod \"b811225b-766e-41ce-a810-bbf95e5f4e3b\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609322 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-config-data\") pod \"4b01d155-d948-46cb-b5b7-e510da29c9a0\" (UID: \"4b01d155-d948-46cb-b5b7-e510da29c9a0\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609340 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-public-tls-certs\") pod \"b811225b-766e-41ce-a810-bbf95e5f4e3b\" (UID: \"b811225b-766e-41ce-a810-bbf95e5f4e3b\") " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.609444 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b811225b-766e-41ce-a810-bbf95e5f4e3b" (UID: "b811225b-766e-41ce-a810-bbf95e5f4e3b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610008 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-logs" (OuterVolumeSpecName: "logs") pod "4b01d155-d948-46cb-b5b7-e510da29c9a0" (UID: "4b01d155-d948-46cb-b5b7-e510da29c9a0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610424 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-logs" (OuterVolumeSpecName: "logs") pod "b811225b-766e-41ce-a810-bbf95e5f4e3b" (UID: "b811225b-766e-41ce-a810-bbf95e5f4e3b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610478 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610497 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610507 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610517 4814 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610525 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610499 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-config-data" (OuterVolumeSpecName: "config-data") pod "8514d901-6322-4fb5-ad48-5c925db3b7de" (UID: "8514d901-6322-4fb5-ad48-5c925db3b7de"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610545 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-scripts" (OuterVolumeSpecName: "scripts") pod "8514d901-6322-4fb5-ad48-5c925db3b7de" (UID: "8514d901-6322-4fb5-ad48-5c925db3b7de"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610535 4814 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d30d0a83-bcbe-4915-ba04-dbbeee62022a-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610601 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610633 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d30d0a83-bcbe-4915-ba04-dbbeee62022a-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610662 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610672 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02fc56a5-86a0-4983-8219-0c0f4f220b7b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610680 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d30d0a83-bcbe-4915-ba04-dbbeee62022a-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610692 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phn6r\" (UniqueName: \"kubernetes.io/projected/d30d0a83-bcbe-4915-ba04-dbbeee62022a-kube-api-access-phn6r\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.610703 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tp72b\" (UniqueName: \"kubernetes.io/projected/02fc56a5-86a0-4983-8219-0c0f4f220b7b-kube-api-access-tp72b\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.612415 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4b01d155-d948-46cb-b5b7-e510da29c9a0" (UID: "4b01d155-d948-46cb-b5b7-e510da29c9a0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.612864 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "b811225b-766e-41ce-a810-bbf95e5f4e3b" (UID: "b811225b-766e-41ce-a810-bbf95e5f4e3b"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.613712 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8514d901-6322-4fb5-ad48-5c925db3b7de-logs" (OuterVolumeSpecName: "logs") pod "8514d901-6322-4fb5-ad48-5c925db3b7de" (UID: "8514d901-6322-4fb5-ad48-5c925db3b7de"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.617728 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-kube-api-access-6wkqg" (OuterVolumeSpecName: "kube-api-access-6wkqg") pod "6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca" (UID: "6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca"). InnerVolumeSpecName "kube-api-access-6wkqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.617811 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b811225b-766e-41ce-a810-bbf95e5f4e3b-kube-api-access-mqh8r" (OuterVolumeSpecName: "kube-api-access-mqh8r") pod "b811225b-766e-41ce-a810-bbf95e5f4e3b" (UID: "b811225b-766e-41ce-a810-bbf95e5f4e3b"). InnerVolumeSpecName "kube-api-access-mqh8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.618067 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8514d901-6322-4fb5-ad48-5c925db3b7de-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "8514d901-6322-4fb5-ad48-5c925db3b7de" (UID: "8514d901-6322-4fb5-ad48-5c925db3b7de"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.618322 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8514d901-6322-4fb5-ad48-5c925db3b7de-kube-api-access-x8t2d" (OuterVolumeSpecName: "kube-api-access-x8t2d") pod "8514d901-6322-4fb5-ad48-5c925db3b7de" (UID: "8514d901-6322-4fb5-ad48-5c925db3b7de"). InnerVolumeSpecName "kube-api-access-x8t2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.619844 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-scripts" (OuterVolumeSpecName: "scripts") pod "b811225b-766e-41ce-a810-bbf95e5f4e3b" (UID: "b811225b-766e-41ce-a810-bbf95e5f4e3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.619929 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-scripts" (OuterVolumeSpecName: "scripts") pod "4b01d155-d948-46cb-b5b7-e510da29c9a0" (UID: "4b01d155-d948-46cb-b5b7-e510da29c9a0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.620471 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b01d155-d948-46cb-b5b7-e510da29c9a0-kube-api-access-6mmxj" (OuterVolumeSpecName: "kube-api-access-6mmxj") pod "4b01d155-d948-46cb-b5b7-e510da29c9a0" (UID: "4b01d155-d948-46cb-b5b7-e510da29c9a0"). InnerVolumeSpecName "kube-api-access-6mmxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.623257 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "4b01d155-d948-46cb-b5b7-e510da29c9a0" (UID: "4b01d155-d948-46cb-b5b7-e510da29c9a0"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.640042 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b811225b-766e-41ce-a810-bbf95e5f4e3b" (UID: "b811225b-766e-41ce-a810-bbf95e5f4e3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.642169 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca" (UID: "6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.644312 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b01d155-d948-46cb-b5b7-e510da29c9a0" (UID: "4b01d155-d948-46cb-b5b7-e510da29c9a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.657107 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-config" (OuterVolumeSpecName: "config") pod "6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca" (UID: "6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.659040 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b811225b-766e-41ce-a810-bbf95e5f4e3b" (UID: "b811225b-766e-41ce-a810-bbf95e5f4e3b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.681470 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-config-data" (OuterVolumeSpecName: "config-data") pod "4b01d155-d948-46cb-b5b7-e510da29c9a0" (UID: "4b01d155-d948-46cb-b5b7-e510da29c9a0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.685663 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4b01d155-d948-46cb-b5b7-e510da29c9a0" (UID: "4b01d155-d948-46cb-b5b7-e510da29c9a0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.691362 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-config-data" (OuterVolumeSpecName: "config-data") pod "b811225b-766e-41ce-a810-bbf95e5f4e3b" (UID: "b811225b-766e-41ce-a810-bbf95e5f4e3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712094 4814 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712134 4814 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8514d901-6322-4fb5-ad48-5c925db3b7de-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712203 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712271 4814 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712285 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8514d901-6322-4fb5-ad48-5c925db3b7de-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712295 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712303 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqh8r\" (UniqueName: \"kubernetes.io/projected/b811225b-766e-41ce-a810-bbf95e5f4e3b-kube-api-access-mqh8r\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712312 4814 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b01d155-d948-46cb-b5b7-e510da29c9a0-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712356 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8t2d\" (UniqueName: \"kubernetes.io/projected/8514d901-6322-4fb5-ad48-5c925db3b7de-kube-api-access-x8t2d\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712366 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712374 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712382 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712431 4814 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712449 4814 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712459 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8514d901-6322-4fb5-ad48-5c925db3b7de-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712468 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712476 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wkqg\" (UniqueName: \"kubernetes.io/projected/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-kube-api-access-6wkqg\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712484 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b811225b-766e-41ce-a810-bbf95e5f4e3b-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712491 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mmxj\" (UniqueName: \"kubernetes.io/projected/4b01d155-d948-46cb-b5b7-e510da29c9a0-kube-api-access-6mmxj\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712518 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712526 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b811225b-766e-41ce-a810-bbf95e5f4e3b-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.712534 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b01d155-d948-46cb-b5b7-e510da29c9a0-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.729259 4814 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.729871 4814 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.813511 4814 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.813543 4814 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.853176 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fd796464f-s7864" event={"ID":"8514d901-6322-4fb5-ad48-5c925db3b7de","Type":"ContainerDied","Data":"0fef9e0b3bc10692bcabfc69d05bc39f4f5b4952b27b170257a6395d94250646"} Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.854677 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fd796464f-s7864" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.865769 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" event={"ID":"02fc56a5-86a0-4983-8219-0c0f4f220b7b","Type":"ContainerDied","Data":"258eeaeb2d8f75334af3fbb29034005202978ef07944789823b7f9c3913521e8"} Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.865824 4814 scope.go:117] "RemoveContainer" containerID="888e29db900467c06d89a2ca16d7477bfcd2f0e54110ca90eae2b05a4f535deb" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.865998 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.878942 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-768fd6449-lhdq7" event={"ID":"d30d0a83-bcbe-4915-ba04-dbbeee62022a","Type":"ContainerDied","Data":"997a5d64ab12ad468d1d1e73e9fe2a63d46d38592fc69cfe28bbfd2ba41556a3"} Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.879048 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-768fd6449-lhdq7" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.880929 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.881209 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b811225b-766e-41ce-a810-bbf95e5f4e3b","Type":"ContainerDied","Data":"f8c98d3c6f12be47aec73ef8ebd6c66f88669df2534ff922aa3d1aab8c137c34"} Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.885102 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rpl5x" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.885170 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rpl5x" event={"ID":"6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca","Type":"ContainerDied","Data":"db18d922924f435348b22eb70d8dd6e11f79fa1a70028083cb6af2410f62b225"} Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.885194 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db18d922924f435348b22eb70d8dd6e11f79fa1a70028083cb6af2410f62b225" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.892724 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4b01d155-d948-46cb-b5b7-e510da29c9a0","Type":"ContainerDied","Data":"c3820d3b6495d87959334e35014b481897dbc05dbaebe632ad63c7d5a5b98ac4"} Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.892848 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:35 crc kubenswrapper[4814]: E0122 05:36:35.893602 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-rfq2m" podUID="66536b14-9f47-4fe1-bc77-583a4ffff700" Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.961955 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-fd796464f-s7864"] Jan 22 05:36:35 crc kubenswrapper[4814]: I0122 05:36:35.983014 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-fd796464f-s7864"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.022255 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-6rwmc"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.035555 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-6rwmc"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.062844 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-768fd6449-lhdq7"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.069902 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-768fd6449-lhdq7"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.076681 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.082208 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.087849 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.093675 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.099598 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:36:36 crc kubenswrapper[4814]: E0122 05:36:36.100912 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerName="glance-httpd" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.100934 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerName="glance-httpd" Jan 22 05:36:36 crc kubenswrapper[4814]: E0122 05:36:36.100946 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.100953 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" Jan 22 05:36:36 crc kubenswrapper[4814]: E0122 05:36:36.100964 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca" containerName="neutron-db-sync" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.100970 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca" containerName="neutron-db-sync" Jan 22 05:36:36 crc kubenswrapper[4814]: E0122 05:36:36.100979 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerName="glance-log" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.100986 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerName="glance-log" Jan 22 05:36:36 crc kubenswrapper[4814]: E0122 05:36:36.100994 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="init" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101000 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="init" Jan 22 05:36:36 crc kubenswrapper[4814]: E0122 05:36:36.101013 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerName="glance-httpd" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101019 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerName="glance-httpd" Jan 22 05:36:36 crc kubenswrapper[4814]: E0122 05:36:36.101032 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerName="glance-log" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101039 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerName="glance-log" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101192 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerName="glance-httpd" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101205 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" containerName="glance-log" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101214 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101223 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerName="glance-httpd" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101230 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca" containerName="neutron-db-sync" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.101244 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" containerName="glance-log" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.102132 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.106959 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.107132 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.107238 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.107354 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-jnz8l" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.107440 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.121849 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.123692 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.126856 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.126985 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.131733 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.219231 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-logs\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.219304 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.219326 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.219455 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.219555 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.219584 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.219614 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.219763 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jc9v\" (UniqueName: \"kubernetes.io/projected/5e974d1b-3f13-47ce-b454-8388999303ae-kube-api-access-8jc9v\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.321813 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-logs\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.321876 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.321902 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc6dd\" (UniqueName: \"kubernetes.io/projected/3249a9bd-6017-4d3a-80df-4b34a69af9e9-kube-api-access-gc6dd\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.321920 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.321935 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.321967 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.321996 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322012 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322026 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322043 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322062 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322077 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322109 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-config-data\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322129 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-scripts\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322150 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-logs\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322176 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jc9v\" (UniqueName: \"kubernetes.io/projected/5e974d1b-3f13-47ce-b454-8388999303ae-kube-api-access-8jc9v\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.322882 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-logs\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.323666 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.323688 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.327019 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.331865 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.347306 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.348196 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.355994 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.356121 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jc9v\" (UniqueName: \"kubernetes.io/projected/5e974d1b-3f13-47ce-b454-8388999303ae-kube-api-access-8jc9v\") pod \"glance-default-internal-api-0\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.370793 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" path="/var/lib/kubelet/pods/02fc56a5-86a0-4983-8219-0c0f4f220b7b/volumes" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.371556 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b01d155-d948-46cb-b5b7-e510da29c9a0" path="/var/lib/kubelet/pods/4b01d155-d948-46cb-b5b7-e510da29c9a0/volumes" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.372421 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8514d901-6322-4fb5-ad48-5c925db3b7de" path="/var/lib/kubelet/pods/8514d901-6322-4fb5-ad48-5c925db3b7de/volumes" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.373349 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b811225b-766e-41ce-a810-bbf95e5f4e3b" path="/var/lib/kubelet/pods/b811225b-766e-41ce-a810-bbf95e5f4e3b/volumes" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.374439 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d30d0a83-bcbe-4915-ba04-dbbeee62022a" path="/var/lib/kubelet/pods/d30d0a83-bcbe-4915-ba04-dbbeee62022a/volumes" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.375014 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eccfbe4e-3954-4ff0-964e-4dfbc753fdb1" path="/var/lib/kubelet/pods/eccfbe4e-3954-4ff0-964e-4dfbc753fdb1/volumes" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.417723 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424034 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424077 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424102 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424148 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-config-data\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424168 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-scripts\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424188 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-logs\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424249 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424275 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc6dd\" (UniqueName: \"kubernetes.io/projected/3249a9bd-6017-4d3a-80df-4b34a69af9e9-kube-api-access-gc6dd\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424847 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424959 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.424950 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-logs\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.429164 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.429164 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.429302 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-scripts\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.432112 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-config-data\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.456061 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.457277 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc6dd\" (UniqueName: \"kubernetes.io/projected/3249a9bd-6017-4d3a-80df-4b34a69af9e9-kube-api-access-gc6dd\") pod \"glance-default-external-api-0\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.730878 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-7tgnz"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.732343 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.738705 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.759027 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.759089 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.759126 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.759143 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.759169 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-config\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.759200 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpq7j\" (UniqueName: \"kubernetes.io/projected/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-kube-api-access-fpq7j\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.765986 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-7tgnz"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.822991 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-787f7bb69b-kz5kq"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.824412 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.829021 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.829087 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.829113 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-n6pdm" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.836093 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.851140 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-787f7bb69b-kz5kq"] Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.865412 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpq7j\" (UniqueName: \"kubernetes.io/projected/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-kube-api-access-fpq7j\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.865508 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.865554 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.865597 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.865618 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.865663 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-config\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.866558 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-config\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.866749 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.871318 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.885534 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.888392 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.909240 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpq7j\" (UniqueName: \"kubernetes.io/projected/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-kube-api-access-fpq7j\") pod \"dnsmasq-dns-84b966f6c9-7tgnz\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.951017 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-6rwmc" podUID="02fc56a5-86a0-4983-8219-0c0f4f220b7b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.967613 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-httpd-config\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.967681 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-config\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.967746 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47gql\" (UniqueName: \"kubernetes.io/projected/a693759e-220d-4f38-ab6d-e4e21b91fefa-kube-api-access-47gql\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.967775 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-combined-ca-bundle\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:36 crc kubenswrapper[4814]: I0122 05:36:36.967809 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-ovndb-tls-certs\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.050114 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.070532 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-httpd-config\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.070576 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-config\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.070659 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47gql\" (UniqueName: \"kubernetes.io/projected/a693759e-220d-4f38-ab6d-e4e21b91fefa-kube-api-access-47gql\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.070686 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-combined-ca-bundle\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.070715 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-ovndb-tls-certs\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.074173 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-ovndb-tls-certs\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.075428 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-combined-ca-bundle\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.078433 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-httpd-config\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.079365 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-config\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.090952 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47gql\" (UniqueName: \"kubernetes.io/projected/a693759e-220d-4f38-ab6d-e4e21b91fefa-kube-api-access-47gql\") pod \"neutron-787f7bb69b-kz5kq\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.157389 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:37 crc kubenswrapper[4814]: I0122 05:36:37.787877 4814 scope.go:117] "RemoveContainer" containerID="62d72c115f899c875878b510db9102c030d9927de129565d7be5798b3d9f2d6e" Jan 22 05:36:37 crc kubenswrapper[4814]: E0122 05:36:37.826698 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 22 05:36:37 crc kubenswrapper[4814]: E0122 05:36:37.826838 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vcwpn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-mk8qf_openstack(6af8690c-751e-4196-b6f4-db21950c5ec7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:36:37 crc kubenswrapper[4814]: E0122 05:36:37.828215 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-mk8qf" podUID="6af8690c-751e-4196-b6f4-db21950c5ec7" Jan 22 05:36:38 crc kubenswrapper[4814]: E0122 05:36:38.022672 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-mk8qf" podUID="6af8690c-751e-4196-b6f4-db21950c5ec7" Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.140812 4814 scope.go:117] "RemoveContainer" containerID="861f79f543bab6d5fb64e96a82b7cb455ff121ba1b2400bc6046cbcc0f7a897e" Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.246427 4814 scope.go:117] "RemoveContainer" containerID="d0463c0fc7cf743707a16a34781b69dc8fc0bd7c04e8c7e27040f198dcabd2c2" Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.298581 4814 scope.go:117] "RemoveContainer" containerID="6b5399e2a4309766348db84dc35daf025b73a22c06c8d2bc3ccd95fc1cdd4241" Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.369409 4814 scope.go:117] "RemoveContainer" containerID="006cb23e2f9f73a1813492c70c246ea2f1fd0d26f2646106455aec21ff544b13" Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.384376 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fc797bd5d-f6wlm"] Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.389396 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fvkql"] Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.417477 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.545946 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-75cf549f68-bs2gm"] Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.672215 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-7tgnz"] Jan 22 05:36:38 crc kubenswrapper[4814]: W0122 05:36:38.676795 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f8c8fc7_eaa8_4231_99b2_26c1fa17c795.slice/crio-1fd04e98fcd47bd0ef2e1d7ad295d07682bec4608630a91c390e0cb7d0a4e1dd WatchSource:0}: Error finding container 1fd04e98fcd47bd0ef2e1d7ad295d07682bec4608630a91c390e0cb7d0a4e1dd: Status 404 returned error can't find the container with id 1fd04e98fcd47bd0ef2e1d7ad295d07682bec4608630a91c390e0cb7d0a4e1dd Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.725175 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.907113 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.997057 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerStarted","Data":"daa591d018eaab0582fdc764b67e1553c318f27f3d77a1cfa5bb0e21425c3007"} Jan 22 05:36:38 crc kubenswrapper[4814]: I0122 05:36:38.998357 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e974d1b-3f13-47ce-b454-8388999303ae","Type":"ContainerStarted","Data":"dbdfa45b43487514450f7cc8bca52e0072a4f059e07f9b5e96883f0fde240d5b"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.000891 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3249a9bd-6017-4d3a-80df-4b34a69af9e9","Type":"ContainerStarted","Data":"cfa0e979f6bc95f4765bc0318ca34dc4d2c157d8d7ccf59bd40ef8ca52271fc6"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.002322 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fc797bd5d-f6wlm" event={"ID":"50923695-9bcc-49c5-844f-6275c99729e2","Type":"ContainerStarted","Data":"e0b158fa80cdab1ce98078e95ef9d217a7d096649738a7efb444d15b3c9a5bb3"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.004975 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" event={"ID":"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795","Type":"ContainerStarted","Data":"1fd04e98fcd47bd0ef2e1d7ad295d07682bec4608630a91c390e0cb7d0a4e1dd"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.006306 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75cf549f68-bs2gm" event={"ID":"d7073bb4-1466-4fe6-bb49-f91bbee77dbd","Type":"ContainerStarted","Data":"1d68b28394e699b16dc46eccba7ec4b138e152bf5c1d81464dc0875fc858ae7c"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.009172 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vd5qz" event={"ID":"458612b9-c230-4db2-82d3-0a1b8fbe81f1","Type":"ContainerStarted","Data":"f8da10c693fdef6d2e86919c1d80ab87c23d84c6c5362d0c4933a612f3d9545b"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.023191 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-n8qnk" event={"ID":"81f9b3f5-db07-49ef-933f-ef90f1c017f6","Type":"ContainerStarted","Data":"d7f6bb631b94bc34035a198577d446febd1954744ddfb1fb353107eb9b654ba5"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.029261 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fvkql" event={"ID":"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3","Type":"ContainerStarted","Data":"4452e3c773b579c12751fa7425bc88f50efdbba9a5b14cee90cbcf566531a877"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.029339 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fvkql" event={"ID":"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3","Type":"ContainerStarted","Data":"2a65d47642a96e9a7038b5cb62fbea2ae979d958ab1b32882439cf907746be0c"} Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.041824 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-vd5qz" podStartSLOduration=6.326368443 podStartE2EDuration="46.041796208s" podCreationTimestamp="2026-01-22 05:35:53 +0000 UTC" firstStartedPulling="2026-01-22 05:35:55.542388715 +0000 UTC m=+1041.625876930" lastFinishedPulling="2026-01-22 05:36:35.25781648 +0000 UTC m=+1081.341304695" observedRunningTime="2026-01-22 05:36:39.024208854 +0000 UTC m=+1085.107697069" watchObservedRunningTime="2026-01-22 05:36:39.041796208 +0000 UTC m=+1085.125284423" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.071155 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-n8qnk" podStartSLOduration=3.980036299 podStartE2EDuration="47.071139148s" podCreationTimestamp="2026-01-22 05:35:52 +0000 UTC" firstStartedPulling="2026-01-22 05:35:55.108751277 +0000 UTC m=+1041.192239492" lastFinishedPulling="2026-01-22 05:36:38.199854126 +0000 UTC m=+1084.283342341" observedRunningTime="2026-01-22 05:36:39.04893264 +0000 UTC m=+1085.132420855" watchObservedRunningTime="2026-01-22 05:36:39.071139148 +0000 UTC m=+1085.154627363" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.084673 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-fvkql" podStartSLOduration=16.084657897 podStartE2EDuration="16.084657897s" podCreationTimestamp="2026-01-22 05:36:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:39.066924067 +0000 UTC m=+1085.150412282" watchObservedRunningTime="2026-01-22 05:36:39.084657897 +0000 UTC m=+1085.168146112" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.296247 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6cc4dbcdb9-79wm4"] Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.300612 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.303706 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.303992 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.307010 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6cc4dbcdb9-79wm4"] Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.440525 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-internal-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.440577 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-httpd-config\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.441522 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-config\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.441577 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-combined-ca-bundle\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.441607 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-ovndb-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.441669 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-public-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.441695 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbw96\" (UniqueName: \"kubernetes.io/projected/c4bdc5ec-1c01-4278-b941-ec748d494a8c-kube-api-access-fbw96\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.547471 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-internal-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.547524 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-httpd-config\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.547595 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-config\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.547649 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-combined-ca-bundle\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.547683 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-ovndb-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.547727 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-public-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.547752 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbw96\" (UniqueName: \"kubernetes.io/projected/c4bdc5ec-1c01-4278-b941-ec748d494a8c-kube-api-access-fbw96\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.574843 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-combined-ca-bundle\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.574934 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-public-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.575466 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-config\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.575507 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-internal-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.576174 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-httpd-config\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.577988 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-ovndb-tls-certs\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.617043 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-787f7bb69b-kz5kq"] Jan 22 05:36:39 crc kubenswrapper[4814]: W0122 05:36:39.645821 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda693759e_220d_4f38_ab6d_e4e21b91fefa.slice/crio-6c619aca2255e0d51789df55bb94de81848a16ecf122ba79589d5de73f2bf624 WatchSource:0}: Error finding container 6c619aca2255e0d51789df55bb94de81848a16ecf122ba79589d5de73f2bf624: Status 404 returned error can't find the container with id 6c619aca2255e0d51789df55bb94de81848a16ecf122ba79589d5de73f2bf624 Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.683808 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbw96\" (UniqueName: \"kubernetes.io/projected/c4bdc5ec-1c01-4278-b941-ec748d494a8c-kube-api-access-fbw96\") pod \"neutron-6cc4dbcdb9-79wm4\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:39 crc kubenswrapper[4814]: I0122 05:36:39.931939 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.077268 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-787f7bb69b-kz5kq" event={"ID":"a693759e-220d-4f38-ab6d-e4e21b91fefa","Type":"ContainerStarted","Data":"6c619aca2255e0d51789df55bb94de81848a16ecf122ba79589d5de73f2bf624"} Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.106307 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e974d1b-3f13-47ce-b454-8388999303ae","Type":"ContainerStarted","Data":"ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6"} Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.129164 4814 generic.go:334] "Generic (PLEG): container finished" podID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" containerID="5007a78765effcbe8ead2fc11c91c0dc0cc6788cc657ee1a710ceac01b23050c" exitCode=0 Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.129390 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" event={"ID":"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795","Type":"ContainerDied","Data":"5007a78765effcbe8ead2fc11c91c0dc0cc6788cc657ee1a710ceac01b23050c"} Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.134901 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75cf549f68-bs2gm" event={"ID":"d7073bb4-1466-4fe6-bb49-f91bbee77dbd","Type":"ContainerStarted","Data":"53173944606f62db6de9e34d1cf4afb1a1879ba70678f3525b5142df93c6585c"} Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.138057 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fc797bd5d-f6wlm" event={"ID":"50923695-9bcc-49c5-844f-6275c99729e2","Type":"ContainerStarted","Data":"9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1"} Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.138081 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fc797bd5d-f6wlm" event={"ID":"50923695-9bcc-49c5-844f-6275c99729e2","Type":"ContainerStarted","Data":"7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff"} Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.203993 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5fc797bd5d-f6wlm" podStartSLOduration=37.577944604 podStartE2EDuration="38.203978325s" podCreationTimestamp="2026-01-22 05:36:02 +0000 UTC" firstStartedPulling="2026-01-22 05:36:38.415991274 +0000 UTC m=+1084.499479489" lastFinishedPulling="2026-01-22 05:36:39.042024995 +0000 UTC m=+1085.125513210" observedRunningTime="2026-01-22 05:36:40.197954378 +0000 UTC m=+1086.281442593" watchObservedRunningTime="2026-01-22 05:36:40.203978325 +0000 UTC m=+1086.287466540" Jan 22 05:36:40 crc kubenswrapper[4814]: I0122 05:36:40.907439 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6cc4dbcdb9-79wm4"] Jan 22 05:36:41 crc kubenswrapper[4814]: I0122 05:36:41.146693 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e974d1b-3f13-47ce-b454-8388999303ae","Type":"ContainerStarted","Data":"efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113"} Jan 22 05:36:41 crc kubenswrapper[4814]: I0122 05:36:41.148765 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75cf549f68-bs2gm" event={"ID":"d7073bb4-1466-4fe6-bb49-f91bbee77dbd","Type":"ContainerStarted","Data":"88933935976f34a7dda8c3866d1566765e46af852796602afd0a0f4e311e2166"} Jan 22 05:36:41 crc kubenswrapper[4814]: I0122 05:36:41.149975 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3249a9bd-6017-4d3a-80df-4b34a69af9e9","Type":"ContainerStarted","Data":"c94f497335b11dd5bab0f1cf53950ffbd676240caabc9e2e4ed2d7910569f89a"} Jan 22 05:36:41 crc kubenswrapper[4814]: I0122 05:36:41.151141 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-787f7bb69b-kz5kq" event={"ID":"a693759e-220d-4f38-ab6d-e4e21b91fefa","Type":"ContainerStarted","Data":"5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9"} Jan 22 05:36:41 crc kubenswrapper[4814]: I0122 05:36:41.175514 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.175497773 podStartE2EDuration="6.175497773s" podCreationTimestamp="2026-01-22 05:36:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:41.170791478 +0000 UTC m=+1087.254279723" watchObservedRunningTime="2026-01-22 05:36:41.175497773 +0000 UTC m=+1087.258985988" Jan 22 05:36:41 crc kubenswrapper[4814]: I0122 05:36:41.196357 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-75cf549f68-bs2gm" podStartSLOduration=38.593070764 podStartE2EDuration="39.1963399s" podCreationTimestamp="2026-01-22 05:36:02 +0000 UTC" firstStartedPulling="2026-01-22 05:36:38.568346816 +0000 UTC m=+1084.651835031" lastFinishedPulling="2026-01-22 05:36:39.171615962 +0000 UTC m=+1085.255104167" observedRunningTime="2026-01-22 05:36:41.19312781 +0000 UTC m=+1087.276616025" watchObservedRunningTime="2026-01-22 05:36:41.1963399 +0000 UTC m=+1087.279828115" Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.202273 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerStarted","Data":"9b7f46b5460c1ad190ffa4151b61b12d2b69ae28f3095f159aab9823219ef863"} Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.206281 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cc4dbcdb9-79wm4" event={"ID":"c4bdc5ec-1c01-4278-b941-ec748d494a8c","Type":"ContainerStarted","Data":"7227166f51a6ea452cf3e6ed6b122ec93a84b70b70b63faf04397ddc39650cad"} Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.206383 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cc4dbcdb9-79wm4" event={"ID":"c4bdc5ec-1c01-4278-b941-ec748d494a8c","Type":"ContainerStarted","Data":"b8bcc01cfe1c622a59e9044973a57b66974a5ee04cc631bf5ec60dd023dcce96"} Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.206463 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cc4dbcdb9-79wm4" event={"ID":"c4bdc5ec-1c01-4278-b941-ec748d494a8c","Type":"ContainerStarted","Data":"76c4e1335b5364de31bae35146d1c73e825e0123c9fec6f948d8177dfd2e47fc"} Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.207616 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.219157 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-787f7bb69b-kz5kq" event={"ID":"a693759e-220d-4f38-ab6d-e4e21b91fefa","Type":"ContainerStarted","Data":"cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684"} Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.219665 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.222415 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" event={"ID":"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795","Type":"ContainerStarted","Data":"e752660ef0289f877b7d6589f5422f4e73f6cfcf662c2d8dc639399613ba2f32"} Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.222441 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.229119 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6cc4dbcdb9-79wm4" podStartSLOduration=3.229106686 podStartE2EDuration="3.229106686s" podCreationTimestamp="2026-01-22 05:36:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:42.225670799 +0000 UTC m=+1088.309159004" watchObservedRunningTime="2026-01-22 05:36:42.229106686 +0000 UTC m=+1088.312594901" Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.252694 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" podStartSLOduration=6.2526799969999995 podStartE2EDuration="6.252679997s" podCreationTimestamp="2026-01-22 05:36:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:42.248022802 +0000 UTC m=+1088.331511017" watchObservedRunningTime="2026-01-22 05:36:42.252679997 +0000 UTC m=+1088.336168212" Jan 22 05:36:42 crc kubenswrapper[4814]: I0122 05:36:42.275368 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-787f7bb69b-kz5kq" podStartSLOduration=6.275346759 podStartE2EDuration="6.275346759s" podCreationTimestamp="2026-01-22 05:36:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:42.265201535 +0000 UTC m=+1088.348689750" watchObservedRunningTime="2026-01-22 05:36:42.275346759 +0000 UTC m=+1088.358834974" Jan 22 05:36:43 crc kubenswrapper[4814]: I0122 05:36:43.100301 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:43 crc kubenswrapper[4814]: I0122 05:36:43.101874 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:36:43 crc kubenswrapper[4814]: I0122 05:36:43.230719 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3249a9bd-6017-4d3a-80df-4b34a69af9e9","Type":"ContainerStarted","Data":"4c89dd296fc00dba463e21e245a6e159ba4c521d268c8d11b56517760752df73"} Jan 22 05:36:43 crc kubenswrapper[4814]: I0122 05:36:43.252714 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.252697338 podStartE2EDuration="7.252697338s" podCreationTimestamp="2026-01-22 05:36:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:43.248392225 +0000 UTC m=+1089.331880450" watchObservedRunningTime="2026-01-22 05:36:43.252697338 +0000 UTC m=+1089.336185553" Jan 22 05:36:43 crc kubenswrapper[4814]: I0122 05:36:43.285072 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:43 crc kubenswrapper[4814]: I0122 05:36:43.285117 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:36:44 crc kubenswrapper[4814]: I0122 05:36:44.244051 4814 generic.go:334] "Generic (PLEG): container finished" podID="458612b9-c230-4db2-82d3-0a1b8fbe81f1" containerID="f8da10c693fdef6d2e86919c1d80ab87c23d84c6c5362d0c4933a612f3d9545b" exitCode=0 Jan 22 05:36:44 crc kubenswrapper[4814]: I0122 05:36:44.244238 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vd5qz" event={"ID":"458612b9-c230-4db2-82d3-0a1b8fbe81f1","Type":"ContainerDied","Data":"f8da10c693fdef6d2e86919c1d80ab87c23d84c6c5362d0c4933a612f3d9545b"} Jan 22 05:36:45 crc kubenswrapper[4814]: I0122 05:36:45.254160 4814 generic.go:334] "Generic (PLEG): container finished" podID="81f9b3f5-db07-49ef-933f-ef90f1c017f6" containerID="d7f6bb631b94bc34035a198577d446febd1954744ddfb1fb353107eb9b654ba5" exitCode=0 Jan 22 05:36:45 crc kubenswrapper[4814]: I0122 05:36:45.254229 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-n8qnk" event={"ID":"81f9b3f5-db07-49ef-933f-ef90f1c017f6","Type":"ContainerDied","Data":"d7f6bb631b94bc34035a198577d446febd1954744ddfb1fb353107eb9b654ba5"} Jan 22 05:36:45 crc kubenswrapper[4814]: I0122 05:36:45.263022 4814 generic.go:334] "Generic (PLEG): container finished" podID="aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" containerID="4452e3c773b579c12751fa7425bc88f50efdbba9a5b14cee90cbcf566531a877" exitCode=0 Jan 22 05:36:45 crc kubenswrapper[4814]: I0122 05:36:45.263226 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fvkql" event={"ID":"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3","Type":"ContainerDied","Data":"4452e3c773b579c12751fa7425bc88f50efdbba9a5b14cee90cbcf566531a877"} Jan 22 05:36:46 crc kubenswrapper[4814]: I0122 05:36:46.418117 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:46 crc kubenswrapper[4814]: I0122 05:36:46.418159 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:46 crc kubenswrapper[4814]: I0122 05:36:46.470459 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:46 crc kubenswrapper[4814]: I0122 05:36:46.473715 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:46 crc kubenswrapper[4814]: I0122 05:36:46.739448 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 05:36:46 crc kubenswrapper[4814]: I0122 05:36:46.739490 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 05:36:46 crc kubenswrapper[4814]: I0122 05:36:46.777810 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 05:36:46 crc kubenswrapper[4814]: I0122 05:36:46.791915 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.051770 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.153132 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-nzwkx"] Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.153385 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" podUID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerName="dnsmasq-dns" containerID="cri-o://7508e78fc668fe22363adf5d95c259d6251ffb9356ef19b19c86116d46a0a671" gracePeriod=10 Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.301379 4814 generic.go:334] "Generic (PLEG): container finished" podID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerID="7508e78fc668fe22363adf5d95c259d6251ffb9356ef19b19c86116d46a0a671" exitCode=0 Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.302910 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" event={"ID":"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d","Type":"ContainerDied","Data":"7508e78fc668fe22363adf5d95c259d6251ffb9356ef19b19c86116d46a0a671"} Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.303235 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.303256 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.303266 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 05:36:47 crc kubenswrapper[4814]: I0122 05:36:47.303276 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.520458 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-n8qnk" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.543386 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.582762 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vd5qz" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658110 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bhfz\" (UniqueName: \"kubernetes.io/projected/81f9b3f5-db07-49ef-933f-ef90f1c017f6-kube-api-access-8bhfz\") pod \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658233 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-combined-ca-bundle\") pod \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658257 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f627g\" (UniqueName: \"kubernetes.io/projected/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-kube-api-access-f627g\") pod \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658341 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-fernet-keys\") pod \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658365 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-scripts\") pod \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658436 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-config-data\") pod \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658453 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-credential-keys\") pod \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\" (UID: \"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658469 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-combined-ca-bundle\") pod \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.658526 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-config-data\") pod \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\" (UID: \"81f9b3f5-db07-49ef-933f-ef90f1c017f6\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.687342 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-kube-api-access-f627g" (OuterVolumeSpecName: "kube-api-access-f627g") pod "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" (UID: "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3"). InnerVolumeSpecName "kube-api-access-f627g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.699092 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" (UID: "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.699190 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81f9b3f5-db07-49ef-933f-ef90f1c017f6-kube-api-access-8bhfz" (OuterVolumeSpecName: "kube-api-access-8bhfz") pod "81f9b3f5-db07-49ef-933f-ef90f1c017f6" (UID: "81f9b3f5-db07-49ef-933f-ef90f1c017f6"). InnerVolumeSpecName "kube-api-access-8bhfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.699353 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-scripts" (OuterVolumeSpecName: "scripts") pod "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" (UID: "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.699752 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" (UID: "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.744620 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" (UID: "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.750095 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-config-data" (OuterVolumeSpecName: "config-data") pod "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" (UID: "aad42960-bd7f-4a6a-9d2b-74cf1b7084a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.760477 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/458612b9-c230-4db2-82d3-0a1b8fbe81f1-logs\") pod \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.760583 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-scripts\") pod \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.760663 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdsgh\" (UniqueName: \"kubernetes.io/projected/458612b9-c230-4db2-82d3-0a1b8fbe81f1-kube-api-access-zdsgh\") pod \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.760763 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-config-data\") pod \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.760814 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-combined-ca-bundle\") pod \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\" (UID: \"458612b9-c230-4db2-82d3-0a1b8fbe81f1\") " Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.761143 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.761159 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.761168 4814 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.761177 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bhfz\" (UniqueName: \"kubernetes.io/projected/81f9b3f5-db07-49ef-933f-ef90f1c017f6-kube-api-access-8bhfz\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.761185 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.761194 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f627g\" (UniqueName: \"kubernetes.io/projected/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-kube-api-access-f627g\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.761202 4814 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.763132 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/458612b9-c230-4db2-82d3-0a1b8fbe81f1-logs" (OuterVolumeSpecName: "logs") pod "458612b9-c230-4db2-82d3-0a1b8fbe81f1" (UID: "458612b9-c230-4db2-82d3-0a1b8fbe81f1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.775347 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-scripts" (OuterVolumeSpecName: "scripts") pod "458612b9-c230-4db2-82d3-0a1b8fbe81f1" (UID: "458612b9-c230-4db2-82d3-0a1b8fbe81f1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.776834 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "81f9b3f5-db07-49ef-933f-ef90f1c017f6" (UID: "81f9b3f5-db07-49ef-933f-ef90f1c017f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.779859 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/458612b9-c230-4db2-82d3-0a1b8fbe81f1-kube-api-access-zdsgh" (OuterVolumeSpecName: "kube-api-access-zdsgh") pod "458612b9-c230-4db2-82d3-0a1b8fbe81f1" (UID: "458612b9-c230-4db2-82d3-0a1b8fbe81f1"). InnerVolumeSpecName "kube-api-access-zdsgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.818343 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "458612b9-c230-4db2-82d3-0a1b8fbe81f1" (UID: "458612b9-c230-4db2-82d3-0a1b8fbe81f1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.831097 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-config-data" (OuterVolumeSpecName: "config-data") pod "81f9b3f5-db07-49ef-933f-ef90f1c017f6" (UID: "81f9b3f5-db07-49ef-933f-ef90f1c017f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.855901 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-config-data" (OuterVolumeSpecName: "config-data") pod "458612b9-c230-4db2-82d3-0a1b8fbe81f1" (UID: "458612b9-c230-4db2-82d3-0a1b8fbe81f1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.863975 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.864010 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.864021 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/458612b9-c230-4db2-82d3-0a1b8fbe81f1-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.864029 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.864037 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/458612b9-c230-4db2-82d3-0a1b8fbe81f1-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.864045 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81f9b3f5-db07-49ef-933f-ef90f1c017f6-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:48 crc kubenswrapper[4814]: I0122 05:36:48.864053 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdsgh\" (UniqueName: \"kubernetes.io/projected/458612b9-c230-4db2-82d3-0a1b8fbe81f1-kube-api-access-zdsgh\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.319248 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-n8qnk" event={"ID":"81f9b3f5-db07-49ef-933f-ef90f1c017f6","Type":"ContainerDied","Data":"309f30eb3ec2d57289e1532f93129bcbb0e9921deae5b72eebfe67ca7739fed2"} Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.319465 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="309f30eb3ec2d57289e1532f93129bcbb0e9921deae5b72eebfe67ca7739fed2" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.319292 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-n8qnk" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.321353 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fvkql" event={"ID":"aad42960-bd7f-4a6a-9d2b-74cf1b7084a3","Type":"ContainerDied","Data":"2a65d47642a96e9a7038b5cb62fbea2ae979d958ab1b32882439cf907746be0c"} Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.321394 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a65d47642a96e9a7038b5cb62fbea2ae979d958ab1b32882439cf907746be0c" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.321468 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fvkql" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.325434 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.325458 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.325456 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-vd5qz" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.325423 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-vd5qz" event={"ID":"458612b9-c230-4db2-82d3-0a1b8fbe81f1","Type":"ContainerDied","Data":"e4d4d5bab8fb53cf3251c5fde6a37fa497a2b849e55fa0d5ba8567c28399c67e"} Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.325555 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4d4d5bab8fb53cf3251c5fde6a37fa497a2b849e55fa0d5ba8567c28399c67e" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.613889 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.613962 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.733285 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-b696dd656-jb7qd"] Jan 22 05:36:49 crc kubenswrapper[4814]: E0122 05:36:49.733612 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="458612b9-c230-4db2-82d3-0a1b8fbe81f1" containerName="placement-db-sync" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.733640 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="458612b9-c230-4db2-82d3-0a1b8fbe81f1" containerName="placement-db-sync" Jan 22 05:36:49 crc kubenswrapper[4814]: E0122 05:36:49.733663 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" containerName="keystone-bootstrap" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.733670 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" containerName="keystone-bootstrap" Jan 22 05:36:49 crc kubenswrapper[4814]: E0122 05:36:49.733682 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81f9b3f5-db07-49ef-933f-ef90f1c017f6" containerName="heat-db-sync" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.733688 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="81f9b3f5-db07-49ef-933f-ef90f1c017f6" containerName="heat-db-sync" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.733866 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" containerName="keystone-bootstrap" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.733901 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="81f9b3f5-db07-49ef-933f-ef90f1c017f6" containerName="heat-db-sync" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.733923 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="458612b9-c230-4db2-82d3-0a1b8fbe81f1" containerName="placement-db-sync" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.734817 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.737414 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.737506 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.737598 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.737699 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-r9hqf" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.737767 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.749114 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b696dd656-jb7qd"] Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.829210 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7775c58c77-kjdcl"] Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.830216 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.832996 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.833868 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.834095 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.834119 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.836136 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8jbhz" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.836286 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.857669 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7775c58c77-kjdcl"] Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883027 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-combined-ca-bundle\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883058 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nh2q\" (UniqueName: \"kubernetes.io/projected/518356f1-b309-4878-ba09-d63f093a7151-kube-api-access-2nh2q\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883078 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-combined-ca-bundle\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883110 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-public-tls-certs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883135 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-internal-tls-certs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883159 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/518356f1-b309-4878-ba09-d63f093a7151-logs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883177 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-public-tls-certs\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883199 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5kl9\" (UniqueName: \"kubernetes.io/projected/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-kube-api-access-c5kl9\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883218 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-config-data\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883231 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-internal-tls-certs\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883260 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-scripts\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883275 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-credential-keys\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883292 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-fernet-keys\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883320 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-config-data\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.883343 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-scripts\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984615 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-combined-ca-bundle\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984676 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-combined-ca-bundle\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984696 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nh2q\" (UniqueName: \"kubernetes.io/projected/518356f1-b309-4878-ba09-d63f093a7151-kube-api-access-2nh2q\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984731 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-public-tls-certs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984757 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-internal-tls-certs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984786 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/518356f1-b309-4878-ba09-d63f093a7151-logs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984804 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-public-tls-certs\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984828 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5kl9\" (UniqueName: \"kubernetes.io/projected/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-kube-api-access-c5kl9\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984847 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-config-data\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984865 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-internal-tls-certs\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984895 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-scripts\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984912 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-credential-keys\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984928 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-fernet-keys\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984961 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-config-data\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.984985 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-scripts\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.989264 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/518356f1-b309-4878-ba09-d63f093a7151-logs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.992750 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-combined-ca-bundle\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.996995 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-public-tls-certs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.997338 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-credential-keys\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:49 crc kubenswrapper[4814]: I0122 05:36:49.997403 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-internal-tls-certs\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:49.998351 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-internal-tls-certs\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:49.998684 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-combined-ca-bundle\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.001006 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-public-tls-certs\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.001209 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-scripts\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.008968 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-scripts\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.013264 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5kl9\" (UniqueName: \"kubernetes.io/projected/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-kube-api-access-c5kl9\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.018056 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-config-data\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.018485 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nh2q\" (UniqueName: \"kubernetes.io/projected/518356f1-b309-4878-ba09-d63f093a7151-kube-api-access-2nh2q\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.018720 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/518356f1-b309-4878-ba09-d63f093a7151-config-data\") pod \"placement-b696dd656-jb7qd\" (UID: \"518356f1-b309-4878-ba09-d63f093a7151\") " pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.029204 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/076cdf6e-59ca-4712-8e15-88fb2e90f5e8-fernet-keys\") pod \"keystone-7775c58c77-kjdcl\" (UID: \"076cdf6e-59ca-4712-8e15-88fb2e90f5e8\") " pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.051541 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.148068 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.914819 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.915183 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.917873 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.918347 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.918474 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:36:50 crc kubenswrapper[4814]: I0122 05:36:50.942279 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.102607 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.287056 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-75cf549f68-bs2gm" podUID="d7073bb4-1466-4fe6-bb49-f91bbee77dbd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.404949 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" event={"ID":"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d","Type":"ContainerDied","Data":"a178339288e62da6744186708dbb26a2d407dc5bef5466e7d5f6f9601e676ef0"} Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.404994 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a178339288e62da6744186708dbb26a2d407dc5bef5466e7d5f6f9601e676ef0" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.444075 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.454741 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-nb\") pod \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.454803 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z527t\" (UniqueName: \"kubernetes.io/projected/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-kube-api-access-z527t\") pod \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.454848 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-svc\") pod \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.454874 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-sb\") pod \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.454929 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-config\") pod \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.454959 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-swift-storage-0\") pod \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\" (UID: \"c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d\") " Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.512760 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-kube-api-access-z527t" (OuterVolumeSpecName: "kube-api-access-z527t") pod "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" (UID: "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d"). InnerVolumeSpecName "kube-api-access-z527t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.558866 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z527t\" (UniqueName: \"kubernetes.io/projected/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-kube-api-access-z527t\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.603847 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-config" (OuterVolumeSpecName: "config") pod "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" (UID: "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.610403 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" (UID: "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.612409 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" (UID: "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.614050 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" (UID: "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.617875 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" (UID: "c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.660428 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.660455 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.660466 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.660476 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.660487 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.846500 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b696dd656-jb7qd"] Jan 22 05:36:53 crc kubenswrapper[4814]: I0122 05:36:53.922516 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7775c58c77-kjdcl"] Jan 22 05:36:53 crc kubenswrapper[4814]: W0122 05:36:53.926903 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod076cdf6e_59ca_4712_8e15_88fb2e90f5e8.slice/crio-e5b88f45e48f37dc00034d87489aaf38361614c1bdf88074c3d86857657cc157 WatchSource:0}: Error finding container e5b88f45e48f37dc00034d87489aaf38361614c1bdf88074c3d86857657cc157: Status 404 returned error can't find the container with id e5b88f45e48f37dc00034d87489aaf38361614c1bdf88074c3d86857657cc157 Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.176334 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" podUID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: i/o timeout" Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.414956 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b696dd656-jb7qd" event={"ID":"518356f1-b309-4878-ba09-d63f093a7151","Type":"ContainerStarted","Data":"bfaf8ca570acdb712e07626ee3aba033adc7bdf57e43532dd195608a1cdbb513"} Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.415267 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b696dd656-jb7qd" event={"ID":"518356f1-b309-4878-ba09-d63f093a7151","Type":"ContainerStarted","Data":"ac952141f1cfe36776ff5f8fd990e49d9552ca79528ad8145c0f5c11050fb804"} Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.415277 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b696dd656-jb7qd" event={"ID":"518356f1-b309-4878-ba09-d63f093a7151","Type":"ContainerStarted","Data":"580641f8b45bb30290bec6b9c774560c27ca2266c4bf96ab61eb1d977e59d139"} Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.416354 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.416380 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.419400 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerStarted","Data":"78f20c18a7f1881091b952a9a8094d8739980170df3e12e508de878662b54369"} Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.421315 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7775c58c77-kjdcl" event={"ID":"076cdf6e-59ca-4712-8e15-88fb2e90f5e8","Type":"ContainerStarted","Data":"f327b5e626112e6b57a32bb4200ba04f7d6bb02b549a3a29ad7625548e9ba164"} Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.421338 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7775c58c77-kjdcl" event={"ID":"076cdf6e-59ca-4712-8e15-88fb2e90f5e8","Type":"ContainerStarted","Data":"e5b88f45e48f37dc00034d87489aaf38361614c1bdf88074c3d86857657cc157"} Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.421783 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.423065 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-nzwkx" Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.423493 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-rfq2m" event={"ID":"66536b14-9f47-4fe1-bc77-583a4ffff700","Type":"ContainerStarted","Data":"3063722d9c63a1df0619c8dca81b89c685f38221550c110561ad2f90e7bbed23"} Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.435595 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-b696dd656-jb7qd" podStartSLOduration=5.435577625 podStartE2EDuration="5.435577625s" podCreationTimestamp="2026-01-22 05:36:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:54.434400498 +0000 UTC m=+1100.517888713" watchObservedRunningTime="2026-01-22 05:36:54.435577625 +0000 UTC m=+1100.519065840" Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.451602 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7775c58c77-kjdcl" podStartSLOduration=5.4515859110000005 podStartE2EDuration="5.451585911s" podCreationTimestamp="2026-01-22 05:36:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:36:54.451114926 +0000 UTC m=+1100.534603141" watchObservedRunningTime="2026-01-22 05:36:54.451585911 +0000 UTC m=+1100.535074126" Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.488115 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-nzwkx"] Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.495991 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-nzwkx"] Jan 22 05:36:54 crc kubenswrapper[4814]: I0122 05:36:54.501052 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-rfq2m" podStartSLOduration=3.604404297 podStartE2EDuration="1m1.501014522s" podCreationTimestamp="2026-01-22 05:35:53 +0000 UTC" firstStartedPulling="2026-01-22 05:35:55.573289583 +0000 UTC m=+1041.656777788" lastFinishedPulling="2026-01-22 05:36:53.469899798 +0000 UTC m=+1099.553388013" observedRunningTime="2026-01-22 05:36:54.480104645 +0000 UTC m=+1100.563592860" watchObservedRunningTime="2026-01-22 05:36:54.501014522 +0000 UTC m=+1100.584502737" Jan 22 05:36:55 crc kubenswrapper[4814]: I0122 05:36:55.435048 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-mk8qf" event={"ID":"6af8690c-751e-4196-b6f4-db21950c5ec7","Type":"ContainerStarted","Data":"886b309406e11939d37f7f3b6d5f89b974eb0dfda41f0db283ab48142dede4ce"} Jan 22 05:36:55 crc kubenswrapper[4814]: I0122 05:36:55.458444 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-mk8qf" podStartSLOduration=5.073891568 podStartE2EDuration="1m3.458424763s" podCreationTimestamp="2026-01-22 05:35:52 +0000 UTC" firstStartedPulling="2026-01-22 05:35:55.088267903 +0000 UTC m=+1041.171756118" lastFinishedPulling="2026-01-22 05:36:53.472801098 +0000 UTC m=+1099.556289313" observedRunningTime="2026-01-22 05:36:55.450925361 +0000 UTC m=+1101.534413576" watchObservedRunningTime="2026-01-22 05:36:55.458424763 +0000 UTC m=+1101.541912978" Jan 22 05:36:56 crc kubenswrapper[4814]: I0122 05:36:56.354083 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" path="/var/lib/kubelet/pods/c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d/volumes" Jan 22 05:36:57 crc kubenswrapper[4814]: I0122 05:36:57.460574 4814 generic.go:334] "Generic (PLEG): container finished" podID="66536b14-9f47-4fe1-bc77-583a4ffff700" containerID="3063722d9c63a1df0619c8dca81b89c685f38221550c110561ad2f90e7bbed23" exitCode=0 Jan 22 05:36:57 crc kubenswrapper[4814]: I0122 05:36:57.460784 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-rfq2m" event={"ID":"66536b14-9f47-4fe1-bc77-583a4ffff700","Type":"ContainerDied","Data":"3063722d9c63a1df0619c8dca81b89c685f38221550c110561ad2f90e7bbed23"} Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.683199 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.799817 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-db-sync-config-data\") pod \"66536b14-9f47-4fe1-bc77-583a4ffff700\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.799869 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-combined-ca-bundle\") pod \"66536b14-9f47-4fe1-bc77-583a4ffff700\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.799898 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w78nl\" (UniqueName: \"kubernetes.io/projected/66536b14-9f47-4fe1-bc77-583a4ffff700-kube-api-access-w78nl\") pod \"66536b14-9f47-4fe1-bc77-583a4ffff700\" (UID: \"66536b14-9f47-4fe1-bc77-583a4ffff700\") " Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.806001 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66536b14-9f47-4fe1-bc77-583a4ffff700-kube-api-access-w78nl" (OuterVolumeSpecName: "kube-api-access-w78nl") pod "66536b14-9f47-4fe1-bc77-583a4ffff700" (UID: "66536b14-9f47-4fe1-bc77-583a4ffff700"). InnerVolumeSpecName "kube-api-access-w78nl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.814739 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "66536b14-9f47-4fe1-bc77-583a4ffff700" (UID: "66536b14-9f47-4fe1-bc77-583a4ffff700"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.836640 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66536b14-9f47-4fe1-bc77-583a4ffff700" (UID: "66536b14-9f47-4fe1-bc77-583a4ffff700"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.902055 4814 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.902087 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66536b14-9f47-4fe1-bc77-583a4ffff700-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:00 crc kubenswrapper[4814]: I0122 05:37:00.902096 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w78nl\" (UniqueName: \"kubernetes.io/projected/66536b14-9f47-4fe1-bc77-583a4ffff700-kube-api-access-w78nl\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:01 crc kubenswrapper[4814]: I0122 05:37:01.500510 4814 generic.go:334] "Generic (PLEG): container finished" podID="6af8690c-751e-4196-b6f4-db21950c5ec7" containerID="886b309406e11939d37f7f3b6d5f89b974eb0dfda41f0db283ab48142dede4ce" exitCode=0 Jan 22 05:37:01 crc kubenswrapper[4814]: I0122 05:37:01.500817 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-mk8qf" event={"ID":"6af8690c-751e-4196-b6f4-db21950c5ec7","Type":"ContainerDied","Data":"886b309406e11939d37f7f3b6d5f89b974eb0dfda41f0db283ab48142dede4ce"} Jan 22 05:37:01 crc kubenswrapper[4814]: I0122 05:37:01.503426 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-rfq2m" event={"ID":"66536b14-9f47-4fe1-bc77-583a4ffff700","Type":"ContainerDied","Data":"ed1a4542df49686e3ea3e305cfa24d4f33973ceadc34a65f26cee75557ed7a03"} Jan 22 05:37:01 crc kubenswrapper[4814]: I0122 05:37:01.503465 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed1a4542df49686e3ea3e305cfa24d4f33973ceadc34a65f26cee75557ed7a03" Jan 22 05:37:01 crc kubenswrapper[4814]: I0122 05:37:01.503520 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-rfq2m" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.007051 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-85674cd5c5-g47hl"] Jan 22 05:37:02 crc kubenswrapper[4814]: E0122 05:37:02.008239 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerName="init" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.008310 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerName="init" Jan 22 05:37:02 crc kubenswrapper[4814]: E0122 05:37:02.008373 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66536b14-9f47-4fe1-bc77-583a4ffff700" containerName="barbican-db-sync" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.008425 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="66536b14-9f47-4fe1-bc77-583a4ffff700" containerName="barbican-db-sync" Jan 22 05:37:02 crc kubenswrapper[4814]: E0122 05:37:02.008481 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerName="dnsmasq-dns" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.008532 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerName="dnsmasq-dns" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.008787 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c38cefa8-b1e9-43fd-9e7c-ce6e90ba824d" containerName="dnsmasq-dns" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.008866 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="66536b14-9f47-4fe1-bc77-583a4ffff700" containerName="barbican-db-sync" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.009838 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.012345 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-85674cd5c5-g47hl"] Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.021054 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-dvlml" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.021352 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.021461 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.076050 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-cfcbfc454-bzmww"] Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.077499 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.082421 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.108156 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-cfcbfc454-bzmww"] Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.135738 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-combined-ca-bundle\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.135825 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e32c5346-8a9c-41d1-9521-b6253cdc2250-logs\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.135874 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pqx8\" (UniqueName: \"kubernetes.io/projected/e32c5346-8a9c-41d1-9521-b6253cdc2250-kube-api-access-9pqx8\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.135914 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-config-data-custom\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.135936 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-config-data\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.186821 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-9xnx9"] Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.188141 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.200162 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-9xnx9"] Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.238849 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-config-data-custom\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.238899 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-config-data-custom\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.238928 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-config-data\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.238954 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-combined-ca-bundle\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.238983 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-config-data\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.239001 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-combined-ca-bundle\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.239041 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtmz7\" (UniqueName: \"kubernetes.io/projected/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-kube-api-access-gtmz7\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.239075 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-logs\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.239094 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e32c5346-8a9c-41d1-9521-b6253cdc2250-logs\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.239131 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pqx8\" (UniqueName: \"kubernetes.io/projected/e32c5346-8a9c-41d1-9521-b6253cdc2250-kube-api-access-9pqx8\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.239694 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-f6776c7f8-jxx4p"] Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.241972 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.242808 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e32c5346-8a9c-41d1-9521-b6253cdc2250-logs\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.250028 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.263401 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-combined-ca-bundle\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.266289 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-config-data-custom\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.278957 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e32c5346-8a9c-41d1-9521-b6253cdc2250-config-data\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.294394 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pqx8\" (UniqueName: \"kubernetes.io/projected/e32c5346-8a9c-41d1-9521-b6253cdc2250-kube-api-access-9pqx8\") pod \"barbican-worker-85674cd5c5-g47hl\" (UID: \"e32c5346-8a9c-41d1-9521-b6253cdc2250\") " pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.296897 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f6776c7f8-jxx4p"] Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340315 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-config\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340358 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk4sl\" (UniqueName: \"kubernetes.io/projected/004c67d3-d7dc-49b4-853b-89753a6fc6b4-kube-api-access-qk4sl\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340396 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340440 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-config-data-custom\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340483 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340513 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-combined-ca-bundle\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340555 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-config-data\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340570 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340586 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340647 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtmz7\" (UniqueName: \"kubernetes.io/projected/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-kube-api-access-gtmz7\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.340680 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-logs\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.341105 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-logs\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.348298 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-config-data\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.351774 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-combined-ca-bundle\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.364798 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-config-data-custom\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.385321 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtmz7\" (UniqueName: \"kubernetes.io/projected/ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824-kube-api-access-gtmz7\") pod \"barbican-keystone-listener-cfcbfc454-bzmww\" (UID: \"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824\") " pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.409613 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-85674cd5c5-g47hl" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.415552 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442195 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gx67f\" (UniqueName: \"kubernetes.io/projected/d00c25a0-75c3-4eb7-b258-634d56bb62ff-kube-api-access-gx67f\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442289 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442319 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442413 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-config\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442438 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442463 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk4sl\" (UniqueName: \"kubernetes.io/projected/004c67d3-d7dc-49b4-853b-89753a6fc6b4-kube-api-access-qk4sl\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442490 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442534 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-combined-ca-bundle\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442559 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d00c25a0-75c3-4eb7-b258-634d56bb62ff-logs\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442582 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data-custom\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.442611 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.443616 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.444240 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.444463 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.445316 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-config\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.445996 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.462566 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk4sl\" (UniqueName: \"kubernetes.io/projected/004c67d3-d7dc-49b4-853b-89753a6fc6b4-kube-api-access-qk4sl\") pod \"dnsmasq-dns-75c8ddd69c-9xnx9\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.514447 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.545576 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.545675 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-combined-ca-bundle\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.545697 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d00c25a0-75c3-4eb7-b258-634d56bb62ff-logs\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.545715 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data-custom\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.545764 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gx67f\" (UniqueName: \"kubernetes.io/projected/d00c25a0-75c3-4eb7-b258-634d56bb62ff-kube-api-access-gx67f\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.546725 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d00c25a0-75c3-4eb7-b258-634d56bb62ff-logs\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.553907 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data-custom\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.554007 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-combined-ca-bundle\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.556379 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.562193 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerStarted","Data":"55a3b93b021e125ddecae24816013e653a86650f9e7849939871766040bdfcdf"} Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.562252 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="ceilometer-central-agent" containerID="cri-o://daa591d018eaab0582fdc764b67e1553c318f27f3d77a1cfa5bb0e21425c3007" gracePeriod=30 Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.562316 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="proxy-httpd" containerID="cri-o://55a3b93b021e125ddecae24816013e653a86650f9e7849939871766040bdfcdf" gracePeriod=30 Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.562347 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="sg-core" containerID="cri-o://78f20c18a7f1881091b952a9a8094d8739980170df3e12e508de878662b54369" gracePeriod=30 Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.562378 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="ceilometer-notification-agent" containerID="cri-o://9b7f46b5460c1ad190ffa4151b61b12d2b69ae28f3095f159aab9823219ef863" gracePeriod=30 Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.575471 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gx67f\" (UniqueName: \"kubernetes.io/projected/d00c25a0-75c3-4eb7-b258-634d56bb62ff-kube-api-access-gx67f\") pod \"barbican-api-f6776c7f8-jxx4p\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.591485 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.303720842 podStartE2EDuration="1m9.591466699s" podCreationTimestamp="2026-01-22 05:35:53 +0000 UTC" firstStartedPulling="2026-01-22 05:35:55.572985974 +0000 UTC m=+1041.656474189" lastFinishedPulling="2026-01-22 05:37:01.860731831 +0000 UTC m=+1107.944220046" observedRunningTime="2026-01-22 05:37:02.582514607 +0000 UTC m=+1108.666002822" watchObservedRunningTime="2026-01-22 05:37:02.591466699 +0000 UTC m=+1108.674954914" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.669945 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:02 crc kubenswrapper[4814]: I0122 05:37:02.959703 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-85674cd5c5-g47hl"] Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.101047 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.140953 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.196816 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-cfcbfc454-bzmww"] Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.220547 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-9xnx9"] Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.262707 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vcwpn\" (UniqueName: \"kubernetes.io/projected/6af8690c-751e-4196-b6f4-db21950c5ec7-kube-api-access-vcwpn\") pod \"6af8690c-751e-4196-b6f4-db21950c5ec7\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.262800 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-combined-ca-bundle\") pod \"6af8690c-751e-4196-b6f4-db21950c5ec7\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.262901 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-scripts\") pod \"6af8690c-751e-4196-b6f4-db21950c5ec7\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.263033 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-config-data\") pod \"6af8690c-751e-4196-b6f4-db21950c5ec7\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.263063 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-db-sync-config-data\") pod \"6af8690c-751e-4196-b6f4-db21950c5ec7\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.263095 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6af8690c-751e-4196-b6f4-db21950c5ec7-etc-machine-id\") pod \"6af8690c-751e-4196-b6f4-db21950c5ec7\" (UID: \"6af8690c-751e-4196-b6f4-db21950c5ec7\") " Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.264244 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6af8690c-751e-4196-b6f4-db21950c5ec7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6af8690c-751e-4196-b6f4-db21950c5ec7" (UID: "6af8690c-751e-4196-b6f4-db21950c5ec7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.267094 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6af8690c-751e-4196-b6f4-db21950c5ec7-kube-api-access-vcwpn" (OuterVolumeSpecName: "kube-api-access-vcwpn") pod "6af8690c-751e-4196-b6f4-db21950c5ec7" (UID: "6af8690c-751e-4196-b6f4-db21950c5ec7"). InnerVolumeSpecName "kube-api-access-vcwpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.269958 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-scripts" (OuterVolumeSpecName: "scripts") pod "6af8690c-751e-4196-b6f4-db21950c5ec7" (UID: "6af8690c-751e-4196-b6f4-db21950c5ec7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.272206 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "6af8690c-751e-4196-b6f4-db21950c5ec7" (UID: "6af8690c-751e-4196-b6f4-db21950c5ec7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.286356 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-75cf549f68-bs2gm" podUID="d7073bb4-1466-4fe6-bb49-f91bbee77dbd" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.152:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.152:8443: connect: connection refused" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.289964 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6af8690c-751e-4196-b6f4-db21950c5ec7" (UID: "6af8690c-751e-4196-b6f4-db21950c5ec7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.321681 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-config-data" (OuterVolumeSpecName: "config-data") pod "6af8690c-751e-4196-b6f4-db21950c5ec7" (UID: "6af8690c-751e-4196-b6f4-db21950c5ec7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.365434 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.365651 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.365661 4814 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.365671 4814 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6af8690c-751e-4196-b6f4-db21950c5ec7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.365680 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vcwpn\" (UniqueName: \"kubernetes.io/projected/6af8690c-751e-4196-b6f4-db21950c5ec7-kube-api-access-vcwpn\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.365688 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6af8690c-751e-4196-b6f4-db21950c5ec7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.460598 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f6776c7f8-jxx4p"] Jan 22 05:37:03 crc kubenswrapper[4814]: W0122 05:37:03.476742 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd00c25a0_75c3_4eb7_b258_634d56bb62ff.slice/crio-5b599705cb17f5b38aa279df8a192fe4fc676e9a0738df324e7db3cf2045cde6 WatchSource:0}: Error finding container 5b599705cb17f5b38aa279df8a192fe4fc676e9a0738df324e7db3cf2045cde6: Status 404 returned error can't find the container with id 5b599705cb17f5b38aa279df8a192fe4fc676e9a0738df324e7db3cf2045cde6 Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.572902 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-mk8qf" event={"ID":"6af8690c-751e-4196-b6f4-db21950c5ec7","Type":"ContainerDied","Data":"83f451f60aecdd9e05226492e9df2bc9c22cda5197b753fae2a9eded0a67df76"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.572930 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-mk8qf" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.572943 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83f451f60aecdd9e05226492e9df2bc9c22cda5197b753fae2a9eded0a67df76" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.574115 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6776c7f8-jxx4p" event={"ID":"d00c25a0-75c3-4eb7-b258-634d56bb62ff","Type":"ContainerStarted","Data":"5b599705cb17f5b38aa279df8a192fe4fc676e9a0738df324e7db3cf2045cde6"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.575774 4814 generic.go:334] "Generic (PLEG): container finished" podID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" containerID="e0bfe579314b8fc7b9d41603be27e4ef6dc68828bc0d8f2681d8553713ae295f" exitCode=0 Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.575847 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" event={"ID":"004c67d3-d7dc-49b4-853b-89753a6fc6b4","Type":"ContainerDied","Data":"e0bfe579314b8fc7b9d41603be27e4ef6dc68828bc0d8f2681d8553713ae295f"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.575872 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" event={"ID":"004c67d3-d7dc-49b4-853b-89753a6fc6b4","Type":"ContainerStarted","Data":"d84888575455f9be25ee5d9160753b932756c7218e67796ea5f0a325f6a3676b"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.580990 4814 generic.go:334] "Generic (PLEG): container finished" podID="bd231434-0d02-4b13-9a72-c31277deeacf" containerID="55a3b93b021e125ddecae24816013e653a86650f9e7849939871766040bdfcdf" exitCode=0 Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.581016 4814 generic.go:334] "Generic (PLEG): container finished" podID="bd231434-0d02-4b13-9a72-c31277deeacf" containerID="78f20c18a7f1881091b952a9a8094d8739980170df3e12e508de878662b54369" exitCode=2 Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.581023 4814 generic.go:334] "Generic (PLEG): container finished" podID="bd231434-0d02-4b13-9a72-c31277deeacf" containerID="daa591d018eaab0582fdc764b67e1553c318f27f3d77a1cfa5bb0e21425c3007" exitCode=0 Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.581062 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerDied","Data":"55a3b93b021e125ddecae24816013e653a86650f9e7849939871766040bdfcdf"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.581086 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerDied","Data":"78f20c18a7f1881091b952a9a8094d8739980170df3e12e508de878662b54369"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.581095 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerDied","Data":"daa591d018eaab0582fdc764b67e1553c318f27f3d77a1cfa5bb0e21425c3007"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.582248 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" event={"ID":"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824","Type":"ContainerStarted","Data":"8994836fdae22e020d6278b13e440462b71273e995c008bc9030cd462b4e7f65"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.583270 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-85674cd5c5-g47hl" event={"ID":"e32c5346-8a9c-41d1-9521-b6253cdc2250","Type":"ContainerStarted","Data":"b0cc457711c2d27559ca7cbd3cac3b2babf69a4e5035360798f87d30178faff1"} Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.730351 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:03 crc kubenswrapper[4814]: E0122 05:37:03.730744 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6af8690c-751e-4196-b6f4-db21950c5ec7" containerName="cinder-db-sync" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.730760 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="6af8690c-751e-4196-b6f4-db21950c5ec7" containerName="cinder-db-sync" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.730936 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="6af8690c-751e-4196-b6f4-db21950c5ec7" containerName="cinder-db-sync" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.731770 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.736423 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.739077 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-b7c9r" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.739257 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.739370 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.776700 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.863728 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-9xnx9"] Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.876330 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm92g\" (UniqueName: \"kubernetes.io/projected/02168312-40cd-4a3a-966c-49e8347c8459-kube-api-access-wm92g\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.876375 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-scripts\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.876421 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.876450 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.876490 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02168312-40cd-4a3a-966c-49e8347c8459-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.876546 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.940605 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-5hll9"] Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.942137 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.983788 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm92g\" (UniqueName: \"kubernetes.io/projected/02168312-40cd-4a3a-966c-49e8347c8459-kube-api-access-wm92g\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.983917 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-scripts\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.983995 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.984036 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.984060 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02168312-40cd-4a3a-966c-49e8347c8459-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.984116 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.988878 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-5hll9"] Jan 22 05:37:03 crc kubenswrapper[4814]: I0122 05:37:03.990558 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02168312-40cd-4a3a-966c-49e8347c8459-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.004478 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-scripts\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.026676 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm92g\" (UniqueName: \"kubernetes.io/projected/02168312-40cd-4a3a-966c-49e8347c8459-kube-api-access-wm92g\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.064860 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.065957 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.071918 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data\") pod \"cinder-scheduler-0\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.085207 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.085253 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.085272 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.085307 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-svc\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.085325 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-config\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.085353 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljxts\" (UniqueName: \"kubernetes.io/projected/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-kube-api-access-ljxts\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.180393 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.181834 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.185515 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.186980 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.187011 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.187029 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.187066 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-config\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.187081 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-svc\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.187108 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljxts\" (UniqueName: \"kubernetes.io/projected/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-kube-api-access-ljxts\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.188039 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.188340 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-config\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.188887 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-svc\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.189203 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.189525 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.203640 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.221395 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljxts\" (UniqueName: \"kubernetes.io/projected/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-kube-api-access-ljxts\") pod \"dnsmasq-dns-5784cf869f-5hll9\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.288557 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-scripts\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.288616 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.288660 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-295sh\" (UniqueName: \"kubernetes.io/projected/814cb7df-caa1-49f3-a26a-7aea04b643e8-kube-api-access-295sh\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.288676 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/814cb7df-caa1-49f3-a26a-7aea04b643e8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.288716 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/814cb7df-caa1-49f3-a26a-7aea04b643e8-logs\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.288750 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.288801 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data-custom\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.367412 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.393836 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.393889 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data-custom\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.393941 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-scripts\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.393974 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.394002 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/814cb7df-caa1-49f3-a26a-7aea04b643e8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.394018 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-295sh\" (UniqueName: \"kubernetes.io/projected/814cb7df-caa1-49f3-a26a-7aea04b643e8-kube-api-access-295sh\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.394054 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/814cb7df-caa1-49f3-a26a-7aea04b643e8-logs\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.394454 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/814cb7df-caa1-49f3-a26a-7aea04b643e8-logs\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.395336 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/814cb7df-caa1-49f3-a26a-7aea04b643e8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.405015 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-scripts\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.413111 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.420569 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data-custom\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.426614 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.427817 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-295sh\" (UniqueName: \"kubernetes.io/projected/814cb7df-caa1-49f3-a26a-7aea04b643e8-kube-api-access-295sh\") pod \"cinder-api-0\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.476125 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.592891 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.605444 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" event={"ID":"004c67d3-d7dc-49b4-853b-89753a6fc6b4","Type":"ContainerStarted","Data":"d6e7510e910d220d9ea43c4e3d5017e63b9cbacabbb018506fe7a52b50c704e3"} Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.605511 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.646447 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6776c7f8-jxx4p" event={"ID":"d00c25a0-75c3-4eb7-b258-634d56bb62ff","Type":"ContainerStarted","Data":"3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491"} Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.646490 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6776c7f8-jxx4p" event={"ID":"d00c25a0-75c3-4eb7-b258-634d56bb62ff","Type":"ContainerStarted","Data":"2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6"} Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.647470 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.647498 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.651725 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" podStartSLOduration=2.651702908 podStartE2EDuration="2.651702908s" podCreationTimestamp="2026-01-22 05:37:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:04.645243575 +0000 UTC m=+1110.728731780" watchObservedRunningTime="2026-01-22 05:37:04.651702908 +0000 UTC m=+1110.735191123" Jan 22 05:37:04 crc kubenswrapper[4814]: I0122 05:37:04.683485 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-f6776c7f8-jxx4p" podStartSLOduration=2.683469305 podStartE2EDuration="2.683469305s" podCreationTimestamp="2026-01-22 05:37:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:04.680253684 +0000 UTC m=+1110.763741899" watchObservedRunningTime="2026-01-22 05:37:04.683469305 +0000 UTC m=+1110.766957520" Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.193897 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-5hll9"] Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.246502 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.395551 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.664868 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" event={"ID":"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3","Type":"ContainerStarted","Data":"80c26bb38df97c736271b321344d52533134a706ddb1758285dedfbc683c6c2a"} Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.666411 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"814cb7df-caa1-49f3-a26a-7aea04b643e8","Type":"ContainerStarted","Data":"11841e8777cd9a54f78584ceee81b62cedcd3148a824d785c77712bfa3e68f9f"} Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.667456 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02168312-40cd-4a3a-966c-49e8347c8459","Type":"ContainerStarted","Data":"7416febfe3d1586cbc939ccbbb6d1431f5a43701f4eaa5a01281df11aeea38d9"} Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.678387 4814 generic.go:334] "Generic (PLEG): container finished" podID="bd231434-0d02-4b13-9a72-c31277deeacf" containerID="9b7f46b5460c1ad190ffa4151b61b12d2b69ae28f3095f159aab9823219ef863" exitCode=0 Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.678646 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerDied","Data":"9b7f46b5460c1ad190ffa4151b61b12d2b69ae28f3095f159aab9823219ef863"} Jan 22 05:37:05 crc kubenswrapper[4814]: I0122 05:37:05.679232 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" podUID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" containerName="dnsmasq-dns" containerID="cri-o://d6e7510e910d220d9ea43c4e3d5017e63b9cbacabbb018506fe7a52b50c704e3" gracePeriod=10 Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.359898 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.435284 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-run-httpd\") pod \"bd231434-0d02-4b13-9a72-c31277deeacf\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.435371 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-log-httpd\") pod \"bd231434-0d02-4b13-9a72-c31277deeacf\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.435507 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-sg-core-conf-yaml\") pod \"bd231434-0d02-4b13-9a72-c31277deeacf\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.435557 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-config-data\") pod \"bd231434-0d02-4b13-9a72-c31277deeacf\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.435589 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-scripts\") pod \"bd231434-0d02-4b13-9a72-c31277deeacf\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.435606 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-combined-ca-bundle\") pod \"bd231434-0d02-4b13-9a72-c31277deeacf\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.435688 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22ff8\" (UniqueName: \"kubernetes.io/projected/bd231434-0d02-4b13-9a72-c31277deeacf-kube-api-access-22ff8\") pod \"bd231434-0d02-4b13-9a72-c31277deeacf\" (UID: \"bd231434-0d02-4b13-9a72-c31277deeacf\") " Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.436336 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bd231434-0d02-4b13-9a72-c31277deeacf" (UID: "bd231434-0d02-4b13-9a72-c31277deeacf"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.436781 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bd231434-0d02-4b13-9a72-c31277deeacf" (UID: "bd231434-0d02-4b13-9a72-c31277deeacf"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.440942 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.440978 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd231434-0d02-4b13-9a72-c31277deeacf-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.452867 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-scripts" (OuterVolumeSpecName: "scripts") pod "bd231434-0d02-4b13-9a72-c31277deeacf" (UID: "bd231434-0d02-4b13-9a72-c31277deeacf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.453685 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd231434-0d02-4b13-9a72-c31277deeacf-kube-api-access-22ff8" (OuterVolumeSpecName: "kube-api-access-22ff8") pod "bd231434-0d02-4b13-9a72-c31277deeacf" (UID: "bd231434-0d02-4b13-9a72-c31277deeacf"). InnerVolumeSpecName "kube-api-access-22ff8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.545966 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.545994 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22ff8\" (UniqueName: \"kubernetes.io/projected/bd231434-0d02-4b13-9a72-c31277deeacf-kube-api-access-22ff8\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.579869 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bd231434-0d02-4b13-9a72-c31277deeacf" (UID: "bd231434-0d02-4b13-9a72-c31277deeacf"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.647884 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.695199 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd231434-0d02-4b13-9a72-c31277deeacf" (UID: "bd231434-0d02-4b13-9a72-c31277deeacf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.711436 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-config-data" (OuterVolumeSpecName: "config-data") pod "bd231434-0d02-4b13-9a72-c31277deeacf" (UID: "bd231434-0d02-4b13-9a72-c31277deeacf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.730916 4814 generic.go:334] "Generic (PLEG): container finished" podID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" containerID="d6e7510e910d220d9ea43c4e3d5017e63b9cbacabbb018506fe7a52b50c704e3" exitCode=0 Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.730978 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" event={"ID":"004c67d3-d7dc-49b4-853b-89753a6fc6b4","Type":"ContainerDied","Data":"d6e7510e910d220d9ea43c4e3d5017e63b9cbacabbb018506fe7a52b50c704e3"} Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.735075 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.735128 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd231434-0d02-4b13-9a72-c31277deeacf","Type":"ContainerDied","Data":"d3cfb9f7029afb5c97fa0777215f7e30f0dc39f7aced1acd835af2b0af7dd763"} Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.735250 4814 scope.go:117] "RemoveContainer" containerID="55a3b93b021e125ddecae24816013e653a86650f9e7849939871766040bdfcdf" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.750014 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.750040 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd231434-0d02-4b13-9a72-c31277deeacf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.794667 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.816450 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.845472 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:06 crc kubenswrapper[4814]: E0122 05:37:06.845903 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="ceilometer-notification-agent" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.845918 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="ceilometer-notification-agent" Jan 22 05:37:06 crc kubenswrapper[4814]: E0122 05:37:06.845932 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="sg-core" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.845938 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="sg-core" Jan 22 05:37:06 crc kubenswrapper[4814]: E0122 05:37:06.845948 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="ceilometer-central-agent" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.845954 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="ceilometer-central-agent" Jan 22 05:37:06 crc kubenswrapper[4814]: E0122 05:37:06.845971 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="proxy-httpd" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.845978 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="proxy-httpd" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.846136 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="ceilometer-central-agent" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.846153 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="sg-core" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.846162 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="proxy-httpd" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.846170 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" containerName="ceilometer-notification-agent" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.848284 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.858913 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.859096 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.879659 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.955796 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxh47\" (UniqueName: \"kubernetes.io/projected/2a029b9f-d28b-41af-b400-15506591e866-kube-api-access-nxh47\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.955859 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-scripts\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.955900 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-run-httpd\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.955931 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.955968 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-config-data\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.955990 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-log-httpd\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:06 crc kubenswrapper[4814]: I0122 05:37:06.956025 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:06.968849 4814 scope.go:117] "RemoveContainer" containerID="78f20c18a7f1881091b952a9a8094d8739980170df3e12e508de878662b54369" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.057979 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-config-data\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.058021 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-log-httpd\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.058062 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.058112 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxh47\" (UniqueName: \"kubernetes.io/projected/2a029b9f-d28b-41af-b400-15506591e866-kube-api-access-nxh47\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.058131 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-scripts\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.058162 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-run-httpd\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.058193 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.062153 4814 scope.go:117] "RemoveContainer" containerID="9b7f46b5460c1ad190ffa4151b61b12d2b69ae28f3095f159aab9823219ef863" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.062509 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-log-httpd\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.063128 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-run-httpd\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.066784 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.070194 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-config-data\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.070743 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.071580 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-scripts\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.085240 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxh47\" (UniqueName: \"kubernetes.io/projected/2a029b9f-d28b-41af-b400-15506591e866-kube-api-access-nxh47\") pod \"ceilometer-0\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.115812 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.158915 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-sb\") pod \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.159014 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-swift-storage-0\") pod \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.159054 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-config\") pod \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.159101 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-svc\") pod \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.159140 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-nb\") pod \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.159180 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qk4sl\" (UniqueName: \"kubernetes.io/projected/004c67d3-d7dc-49b4-853b-89753a6fc6b4-kube-api-access-qk4sl\") pod \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.168955 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/004c67d3-d7dc-49b4-853b-89753a6fc6b4-kube-api-access-qk4sl" (OuterVolumeSpecName: "kube-api-access-qk4sl") pod "004c67d3-d7dc-49b4-853b-89753a6fc6b4" (UID: "004c67d3-d7dc-49b4-853b-89753a6fc6b4"). InnerVolumeSpecName "kube-api-access-qk4sl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.183778 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.206558 4814 scope.go:117] "RemoveContainer" containerID="daa591d018eaab0582fdc764b67e1553c318f27f3d77a1cfa5bb0e21425c3007" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.248046 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.263137 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qk4sl\" (UniqueName: \"kubernetes.io/projected/004c67d3-d7dc-49b4-853b-89753a6fc6b4-kube-api-access-qk4sl\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.340499 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.390312 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "004c67d3-d7dc-49b4-853b-89753a6fc6b4" (UID: "004c67d3-d7dc-49b4-853b-89753a6fc6b4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.401430 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "004c67d3-d7dc-49b4-853b-89753a6fc6b4" (UID: "004c67d3-d7dc-49b4-853b-89753a6fc6b4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.456005 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-config" (OuterVolumeSpecName: "config") pod "004c67d3-d7dc-49b4-853b-89753a6fc6b4" (UID: "004c67d3-d7dc-49b4-853b-89753a6fc6b4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.459071 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "004c67d3-d7dc-49b4-853b-89753a6fc6b4" (UID: "004c67d3-d7dc-49b4-853b-89753a6fc6b4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.497571 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "004c67d3-d7dc-49b4-853b-89753a6fc6b4" (UID: "004c67d3-d7dc-49b4-853b-89753a6fc6b4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.497733 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-nb\") pod \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\" (UID: \"004c67d3-d7dc-49b4-853b-89753a6fc6b4\") " Jan 22 05:37:07 crc kubenswrapper[4814]: W0122 05:37:07.498375 4814 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/004c67d3-d7dc-49b4-853b-89753a6fc6b4/volumes/kubernetes.io~configmap/ovsdbserver-nb Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.498411 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "004c67d3-d7dc-49b4-853b-89753a6fc6b4" (UID: "004c67d3-d7dc-49b4-853b-89753a6fc6b4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.498733 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.498750 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.498759 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.498768 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.498778 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/004c67d3-d7dc-49b4-853b-89753a6fc6b4-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.624535 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6cc4dbcdb9-79wm4"] Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.625424 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6cc4dbcdb9-79wm4" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-api" containerID="cri-o://b8bcc01cfe1c622a59e9044973a57b66974a5ee04cc631bf5ec60dd023dcce96" gracePeriod=30 Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.626168 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6cc4dbcdb9-79wm4" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-httpd" containerID="cri-o://7227166f51a6ea452cf3e6ed6b122ec93a84b70b70b63faf04397ddc39650cad" gracePeriod=30 Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.689871 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-6cc4dbcdb9-79wm4" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.158:9696/\": EOF" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.747747 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-58687b7457-jl86n"] Jan 22 05:37:07 crc kubenswrapper[4814]: E0122 05:37:07.748280 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" containerName="init" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.748291 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" containerName="init" Jan 22 05:37:07 crc kubenswrapper[4814]: E0122 05:37:07.748338 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" containerName="dnsmasq-dns" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.748345 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" containerName="dnsmasq-dns" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.748654 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" containerName="dnsmasq-dns" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.750002 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.758241 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-58687b7457-jl86n"] Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.802183 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" event={"ID":"004c67d3-d7dc-49b4-853b-89753a6fc6b4","Type":"ContainerDied","Data":"d84888575455f9be25ee5d9160753b932756c7218e67796ea5f0a325f6a3676b"} Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.802231 4814 scope.go:117] "RemoveContainer" containerID="d6e7510e910d220d9ea43c4e3d5017e63b9cbacabbb018506fe7a52b50c704e3" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.802363 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-9xnx9" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.834096 4814 generic.go:334] "Generic (PLEG): container finished" podID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" containerID="1f81f760a1a9e2fb90d74c94ea2f9cea2741a54d732c79c3e752fd2046cdb624" exitCode=0 Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.834151 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" event={"ID":"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3","Type":"ContainerDied","Data":"1f81f760a1a9e2fb90d74c94ea2f9cea2741a54d732c79c3e752fd2046cdb624"} Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.844658 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2pqn\" (UniqueName: \"kubernetes.io/projected/722c584f-63e7-4817-b5f3-14915fbfe930-kube-api-access-m2pqn\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.844699 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-public-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.844749 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-combined-ca-bundle\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.844788 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-httpd-config\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.844813 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-internal-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.844827 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-ovndb-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.844849 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-config\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.857144 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" event={"ID":"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824","Type":"ContainerStarted","Data":"ee089ac9a6a528b059b37e639e6884cb657481ee034afef32dbe104806fe0bda"} Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.857333 4814 scope.go:117] "RemoveContainer" containerID="e0bfe579314b8fc7b9d41603be27e4ef6dc68828bc0d8f2681d8553713ae295f" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.865662 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-9xnx9"] Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.882110 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-9xnx9"] Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.906258 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-85674cd5c5-g47hl" event={"ID":"e32c5346-8a9c-41d1-9521-b6253cdc2250","Type":"ContainerStarted","Data":"1d514702c751dc2585dc1aca558175cfc2522288be175c2d6aa180c0b93d86e4"} Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.945978 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2pqn\" (UniqueName: \"kubernetes.io/projected/722c584f-63e7-4817-b5f3-14915fbfe930-kube-api-access-m2pqn\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.946023 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-public-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.946093 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-combined-ca-bundle\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.946158 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-httpd-config\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.946200 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-internal-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.946218 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-ovndb-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.946242 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-config\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.963269 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-httpd-config\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.964493 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-internal-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.965238 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2pqn\" (UniqueName: \"kubernetes.io/projected/722c584f-63e7-4817-b5f3-14915fbfe930-kube-api-access-m2pqn\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.965399 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-config\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.970043 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-public-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.997911 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-combined-ca-bundle\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:07 crc kubenswrapper[4814]: I0122 05:37:07.998783 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-ovndb-tls-certs\") pod \"neutron-58687b7457-jl86n\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.064051 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.118250 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.410830 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="004c67d3-d7dc-49b4-853b-89753a6fc6b4" path="/var/lib/kubelet/pods/004c67d3-d7dc-49b4-853b-89753a6fc6b4/volumes" Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.421302 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd231434-0d02-4b13-9a72-c31277deeacf" path="/var/lib/kubelet/pods/bd231434-0d02-4b13-9a72-c31277deeacf/volumes" Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.934032 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02168312-40cd-4a3a-966c-49e8347c8459","Type":"ContainerStarted","Data":"7eb9e732f24af4380b07ac7b0338602687d4c83dee6e7ba24957279d1463bd6e"} Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.939719 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" event={"ID":"ff4e9f9d-e8de-40b0-8ae1-723fd6eb7824","Type":"ContainerStarted","Data":"ce8d5afdd491809b4b7caffb0a8ce499cd4077292a88856c4137e2d66137f305"} Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.947522 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-85674cd5c5-g47hl" event={"ID":"e32c5346-8a9c-41d1-9521-b6253cdc2250","Type":"ContainerStarted","Data":"c1c959d6f8d627c6ad50123c8ca073c7877f96b9767da64fdb2d02f518aa1ba7"} Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.955225 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-cfcbfc454-bzmww" podStartSLOduration=3.813508856 podStartE2EDuration="6.955213343s" podCreationTimestamp="2026-01-22 05:37:02 +0000 UTC" firstStartedPulling="2026-01-22 05:37:03.197173991 +0000 UTC m=+1109.280662206" lastFinishedPulling="2026-01-22 05:37:06.338878488 +0000 UTC m=+1112.422366693" observedRunningTime="2026-01-22 05:37:08.953691876 +0000 UTC m=+1115.037180091" watchObservedRunningTime="2026-01-22 05:37:08.955213343 +0000 UTC m=+1115.038701558" Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.980224 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerStarted","Data":"11d51cfbdd02dd3db836f0ae60cc8ae82938e1a01893c1394f2db5675bd30bba"} Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.982292 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-58687b7457-jl86n"] Jan 22 05:37:08 crc kubenswrapper[4814]: I0122 05:37:08.993438 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-85674cd5c5-g47hl" podStartSLOduration=4.662188587 podStartE2EDuration="7.993417623s" podCreationTimestamp="2026-01-22 05:37:01 +0000 UTC" firstStartedPulling="2026-01-22 05:37:03.019719271 +0000 UTC m=+1109.103207486" lastFinishedPulling="2026-01-22 05:37:06.350948307 +0000 UTC m=+1112.434436522" observedRunningTime="2026-01-22 05:37:08.982265853 +0000 UTC m=+1115.065754068" watchObservedRunningTime="2026-01-22 05:37:08.993417623 +0000 UTC m=+1115.076905838" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.015348 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" event={"ID":"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3","Type":"ContainerStarted","Data":"78351aff5ec8b2eeca612e47d857edcbe95c2b889447a2e6d4317c59e9ccdfc1"} Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.015494 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.018007 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"814cb7df-caa1-49f3-a26a-7aea04b643e8","Type":"ContainerStarted","Data":"009bb59520e0be6831e77037620b76c77872439380d6335873339423a398b574"} Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.040201 4814 generic.go:334] "Generic (PLEG): container finished" podID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerID="7227166f51a6ea452cf3e6ed6b122ec93a84b70b70b63faf04397ddc39650cad" exitCode=0 Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.040251 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cc4dbcdb9-79wm4" event={"ID":"c4bdc5ec-1c01-4278-b941-ec748d494a8c","Type":"ContainerDied","Data":"7227166f51a6ea452cf3e6ed6b122ec93a84b70b70b63faf04397ddc39650cad"} Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.046569 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" podStartSLOduration=6.04654914 podStartE2EDuration="6.04654914s" podCreationTimestamp="2026-01-22 05:37:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:09.037130194 +0000 UTC m=+1115.120618399" watchObservedRunningTime="2026-01-22 05:37:09.04654914 +0000 UTC m=+1115.130037355" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.556548 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6f58c7c874-mfrnc"] Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.559351 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.571971 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6f58c7c874-mfrnc"] Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.576148 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.576394 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.689805 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aaf18f71-35b0-4b6f-8047-efd59ab92b85-logs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.690020 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-combined-ca-bundle\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.690221 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n5g8\" (UniqueName: \"kubernetes.io/projected/aaf18f71-35b0-4b6f-8047-efd59ab92b85-kube-api-access-9n5g8\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.690333 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-config-data\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.690426 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-config-data-custom\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.690535 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-public-tls-certs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.690611 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-internal-tls-certs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.792775 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-public-tls-certs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.793045 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-internal-tls-certs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.793126 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aaf18f71-35b0-4b6f-8047-efd59ab92b85-logs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.793144 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-combined-ca-bundle\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.793180 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n5g8\" (UniqueName: \"kubernetes.io/projected/aaf18f71-35b0-4b6f-8047-efd59ab92b85-kube-api-access-9n5g8\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.793213 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-config-data\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.793232 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-config-data-custom\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.794238 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aaf18f71-35b0-4b6f-8047-efd59ab92b85-logs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.800179 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-internal-tls-certs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.802030 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-combined-ca-bundle\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.811778 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-public-tls-certs\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.816115 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n5g8\" (UniqueName: \"kubernetes.io/projected/aaf18f71-35b0-4b6f-8047-efd59ab92b85-kube-api-access-9n5g8\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.826882 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-config-data\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.828387 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/aaf18f71-35b0-4b6f-8047-efd59ab92b85-config-data-custom\") pod \"barbican-api-6f58c7c874-mfrnc\" (UID: \"aaf18f71-35b0-4b6f-8047-efd59ab92b85\") " pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.933638 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-6cc4dbcdb9-79wm4" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.158:9696/\": dial tcp 10.217.0.158:9696: connect: connection refused" Jan 22 05:37:09 crc kubenswrapper[4814]: I0122 05:37:09.935013 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.077198 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerStarted","Data":"ca744b5c40e9c00e28756235b6a42b73fa5ba575ca0c1827955d1411ef7d479b"} Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.107979 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"814cb7df-caa1-49f3-a26a-7aea04b643e8","Type":"ContainerStarted","Data":"75a7c7e8f31dcb8dfbd2521495ef733d4f7376bac2f0ac0b0e7c09cf62550f7f"} Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.109014 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.108620 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerName="cinder-api-log" containerID="cri-o://009bb59520e0be6831e77037620b76c77872439380d6335873339423a398b574" gracePeriod=30 Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.109105 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerName="cinder-api" containerID="cri-o://75a7c7e8f31dcb8dfbd2521495ef733d4f7376bac2f0ac0b0e7c09cf62550f7f" gracePeriod=30 Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.149012 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.148991545 podStartE2EDuration="6.148991545s" podCreationTimestamp="2026-01-22 05:37:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:10.130881697 +0000 UTC m=+1116.214369912" watchObservedRunningTime="2026-01-22 05:37:10.148991545 +0000 UTC m=+1116.232479760" Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.158061 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02168312-40cd-4a3a-966c-49e8347c8459","Type":"ContainerStarted","Data":"4651deb0cb00d00ee5b3ad4c6288d97849f4e83793a9a8e16ab8ddb2b903f0d0"} Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.211781 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58687b7457-jl86n" event={"ID":"722c584f-63e7-4817-b5f3-14915fbfe930","Type":"ContainerStarted","Data":"7214d2a1f39761dae623f6a7f57f7eca2494819d5044c4b0b8161b52304916b9"} Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.219704 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58687b7457-jl86n" event={"ID":"722c584f-63e7-4817-b5f3-14915fbfe930","Type":"ContainerStarted","Data":"8515a74d2b69e32d70f3b779024b35117577ecec86860430f0eb2a9d97394e86"} Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.502063 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.9640009 podStartE2EDuration="7.502046688s" podCreationTimestamp="2026-01-22 05:37:03 +0000 UTC" firstStartedPulling="2026-01-22 05:37:05.584018113 +0000 UTC m=+1111.667506328" lastFinishedPulling="2026-01-22 05:37:07.122063901 +0000 UTC m=+1113.205552116" observedRunningTime="2026-01-22 05:37:10.208108711 +0000 UTC m=+1116.291596926" watchObservedRunningTime="2026-01-22 05:37:10.502046688 +0000 UTC m=+1116.585534893" Jan 22 05:37:10 crc kubenswrapper[4814]: I0122 05:37:10.509197 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6f58c7c874-mfrnc"] Jan 22 05:37:11 crc kubenswrapper[4814]: I0122 05:37:11.222530 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f58c7c874-mfrnc" event={"ID":"aaf18f71-35b0-4b6f-8047-efd59ab92b85","Type":"ContainerStarted","Data":"35d1c64d139d3eaecbe2a78d326c7609ab5dde32a116a481e75741e7c450cb91"} Jan 22 05:37:11 crc kubenswrapper[4814]: I0122 05:37:11.223945 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f58c7c874-mfrnc" event={"ID":"aaf18f71-35b0-4b6f-8047-efd59ab92b85","Type":"ContainerStarted","Data":"83c53bb8e9812f495ec5173d18de4c5a25c402de803d8865402354677fe307ad"} Jan 22 05:37:11 crc kubenswrapper[4814]: I0122 05:37:11.225798 4814 generic.go:334] "Generic (PLEG): container finished" podID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerID="009bb59520e0be6831e77037620b76c77872439380d6335873339423a398b574" exitCode=143 Jan 22 05:37:11 crc kubenswrapper[4814]: I0122 05:37:11.225919 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"814cb7df-caa1-49f3-a26a-7aea04b643e8","Type":"ContainerDied","Data":"009bb59520e0be6831e77037620b76c77872439380d6335873339423a398b574"} Jan 22 05:37:11 crc kubenswrapper[4814]: I0122 05:37:11.229681 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58687b7457-jl86n" event={"ID":"722c584f-63e7-4817-b5f3-14915fbfe930","Type":"ContainerStarted","Data":"703cc6d1048b43a347cc8099f5d47bb1aebdc40d5295fbc80d9678894ecc3558"} Jan 22 05:37:11 crc kubenswrapper[4814]: I0122 05:37:11.229772 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:11 crc kubenswrapper[4814]: I0122 05:37:11.248765 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-58687b7457-jl86n" podStartSLOduration=4.248751026 podStartE2EDuration="4.248751026s" podCreationTimestamp="2026-01-22 05:37:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:11.244916726 +0000 UTC m=+1117.328404941" watchObservedRunningTime="2026-01-22 05:37:11.248751026 +0000 UTC m=+1117.332239241" Jan 22 05:37:12 crc kubenswrapper[4814]: I0122 05:37:12.240857 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerStarted","Data":"c9b104bef1e03ac41be6e83aa258814be1cf2cfa2fbcae5ebc32a04ffc9073c9"} Jan 22 05:37:13 crc kubenswrapper[4814]: I0122 05:37:13.251611 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f58c7c874-mfrnc" event={"ID":"aaf18f71-35b0-4b6f-8047-efd59ab92b85","Type":"ContainerStarted","Data":"fff2945c033e9f562e45a3f771f7e37fa43289eda9ab3ad974339ec9b13b0c26"} Jan 22 05:37:13 crc kubenswrapper[4814]: I0122 05:37:13.251680 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:13 crc kubenswrapper[4814]: I0122 05:37:13.251698 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.280178 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerStarted","Data":"0b862f8c6bac865d13a32133d13d7f93dac606214863cfa8db354bbea0c98d0c"} Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.288279 4814 generic.go:334] "Generic (PLEG): container finished" podID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerID="b8bcc01cfe1c622a59e9044973a57b66974a5ee04cc631bf5ec60dd023dcce96" exitCode=0 Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.289126 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cc4dbcdb9-79wm4" event={"ID":"c4bdc5ec-1c01-4278-b941-ec748d494a8c","Type":"ContainerDied","Data":"b8bcc01cfe1c622a59e9044973a57b66974a5ee04cc631bf5ec60dd023dcce96"} Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.289154 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6cc4dbcdb9-79wm4" event={"ID":"c4bdc5ec-1c01-4278-b941-ec748d494a8c","Type":"ContainerDied","Data":"76c4e1335b5364de31bae35146d1c73e825e0123c9fec6f948d8177dfd2e47fc"} Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.289165 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76c4e1335b5364de31bae35146d1c73e825e0123c9fec6f948d8177dfd2e47fc" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.335238 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.371060 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6f58c7c874-mfrnc" podStartSLOduration=5.371035813 podStartE2EDuration="5.371035813s" podCreationTimestamp="2026-01-22 05:37:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:13.276991302 +0000 UTC m=+1119.360479517" watchObservedRunningTime="2026-01-22 05:37:14.371035813 +0000 UTC m=+1120.454524038" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.387701 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.403607 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-config\") pod \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.404043 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-internal-tls-certs\") pod \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.404078 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-ovndb-tls-certs\") pod \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.404106 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-httpd-config\") pod \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.404174 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-combined-ca-bundle\") pod \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.404207 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-public-tls-certs\") pod \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.404227 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbw96\" (UniqueName: \"kubernetes.io/projected/c4bdc5ec-1c01-4278-b941-ec748d494a8c-kube-api-access-fbw96\") pod \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\" (UID: \"c4bdc5ec-1c01-4278-b941-ec748d494a8c\") " Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.418992 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "c4bdc5ec-1c01-4278-b941-ec748d494a8c" (UID: "c4bdc5ec-1c01-4278-b941-ec748d494a8c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.431314 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4bdc5ec-1c01-4278-b941-ec748d494a8c-kube-api-access-fbw96" (OuterVolumeSpecName: "kube-api-access-fbw96") pod "c4bdc5ec-1c01-4278-b941-ec748d494a8c" (UID: "c4bdc5ec-1c01-4278-b941-ec748d494a8c"). InnerVolumeSpecName "kube-api-access-fbw96". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.478490 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.508561 4814 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.508592 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbw96\" (UniqueName: \"kubernetes.io/projected/c4bdc5ec-1c01-4278-b941-ec748d494a8c-kube-api-access-fbw96\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.543930 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "c4bdc5ec-1c01-4278-b941-ec748d494a8c" (UID: "c4bdc5ec-1c01-4278-b941-ec748d494a8c"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.549216 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c4bdc5ec-1c01-4278-b941-ec748d494a8c" (UID: "c4bdc5ec-1c01-4278-b941-ec748d494a8c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.606495 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-config" (OuterVolumeSpecName: "config") pod "c4bdc5ec-1c01-4278-b941-ec748d494a8c" (UID: "c4bdc5ec-1c01-4278-b941-ec748d494a8c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.607263 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-7tgnz"] Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.607491 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" podUID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" containerName="dnsmasq-dns" containerID="cri-o://e752660ef0289f877b7d6589f5422f4e73f6cfcf662c2d8dc639399613ba2f32" gracePeriod=10 Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.610581 4814 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.610602 4814 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.610613 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.616753 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c4bdc5ec-1c01-4278-b941-ec748d494a8c" (UID: "c4bdc5ec-1c01-4278-b941-ec748d494a8c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.668758 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c4bdc5ec-1c01-4278-b941-ec748d494a8c" (UID: "c4bdc5ec-1c01-4278-b941-ec748d494a8c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.711776 4814 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:14 crc kubenswrapper[4814]: I0122 05:37:14.711809 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4bdc5ec-1c01-4278-b941-ec748d494a8c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.213115 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.311926 4814 generic.go:334] "Generic (PLEG): container finished" podID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" containerID="e752660ef0289f877b7d6589f5422f4e73f6cfcf662c2d8dc639399613ba2f32" exitCode=0 Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.313054 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" event={"ID":"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795","Type":"ContainerDied","Data":"e752660ef0289f877b7d6589f5422f4e73f6cfcf662c2d8dc639399613ba2f32"} Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.313437 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6cc4dbcdb9-79wm4" Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.379765 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.391687 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6cc4dbcdb9-79wm4"] Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.400730 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6cc4dbcdb9-79wm4"] Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.664247 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.759877 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.793635 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.947751 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-swift-storage-0\") pod \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.947926 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-config\") pod \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.948075 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-svc\") pod \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.948198 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-sb\") pod \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.954291 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-nb\") pod \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.954481 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpq7j\" (UniqueName: \"kubernetes.io/projected/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-kube-api-access-fpq7j\") pod \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\" (UID: \"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795\") " Jan 22 05:37:15 crc kubenswrapper[4814]: I0122 05:37:15.969178 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-kube-api-access-fpq7j" (OuterVolumeSpecName: "kube-api-access-fpq7j") pod "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" (UID: "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795"). InnerVolumeSpecName "kube-api-access-fpq7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.044576 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" (UID: "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.056981 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.057012 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpq7j\" (UniqueName: \"kubernetes.io/projected/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-kube-api-access-fpq7j\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.081452 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" (UID: "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.087368 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" (UID: "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.089571 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-config" (OuterVolumeSpecName: "config") pod "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" (UID: "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.118446 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" (UID: "7f8c8fc7-eaa8-4231-99b2-26c1fa17c795"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.158570 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.158603 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.158613 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.158622 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.320727 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerStarted","Data":"bee72efb9bdc072949d09f555869d9fe9b71a6c67fa86c3e7cee6c0091fc716b"} Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.321803 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.324542 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="02168312-40cd-4a3a-966c-49e8347c8459" containerName="cinder-scheduler" containerID="cri-o://7eb9e732f24af4380b07ac7b0338602687d4c83dee6e7ba24957279d1463bd6e" gracePeriod=30 Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.324673 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.325862 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-7tgnz" event={"ID":"7f8c8fc7-eaa8-4231-99b2-26c1fa17c795","Type":"ContainerDied","Data":"1fd04e98fcd47bd0ef2e1d7ad295d07682bec4608630a91c390e0cb7d0a4e1dd"} Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.325933 4814 scope.go:117] "RemoveContainer" containerID="e752660ef0289f877b7d6589f5422f4e73f6cfcf662c2d8dc639399613ba2f32" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.327159 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="02168312-40cd-4a3a-966c-49e8347c8459" containerName="probe" containerID="cri-o://4651deb0cb00d00ee5b3ad4c6288d97849f4e83793a9a8e16ab8ddb2b903f0d0" gracePeriod=30 Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.358787 4814 scope.go:117] "RemoveContainer" containerID="5007a78765effcbe8ead2fc11c91c0dc0cc6788cc657ee1a710ceac01b23050c" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.383642 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" path="/var/lib/kubelet/pods/c4bdc5ec-1c01-4278-b941-ec748d494a8c/volumes" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.403950 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.742318261 podStartE2EDuration="10.403929485s" podCreationTimestamp="2026-01-22 05:37:06 +0000 UTC" firstStartedPulling="2026-01-22 05:37:08.099811593 +0000 UTC m=+1114.183299808" lastFinishedPulling="2026-01-22 05:37:15.761422817 +0000 UTC m=+1121.844911032" observedRunningTime="2026-01-22 05:37:16.351647653 +0000 UTC m=+1122.435135868" watchObservedRunningTime="2026-01-22 05:37:16.403929485 +0000 UTC m=+1122.487417700" Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.414602 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-7tgnz"] Jan 22 05:37:16 crc kubenswrapper[4814]: I0122 05:37:16.432439 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-7tgnz"] Jan 22 05:37:17 crc kubenswrapper[4814]: I0122 05:37:17.897248 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:37:17 crc kubenswrapper[4814]: I0122 05:37:17.902699 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:17 crc kubenswrapper[4814]: I0122 05:37:17.973276 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.356979 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" path="/var/lib/kubelet/pods/7f8c8fc7-eaa8-4231-99b2-26c1fa17c795/volumes" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.357846 4814 generic.go:334] "Generic (PLEG): container finished" podID="02168312-40cd-4a3a-966c-49e8347c8459" containerID="4651deb0cb00d00ee5b3ad4c6288d97849f4e83793a9a8e16ab8ddb2b903f0d0" exitCode=0 Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.357864 4814 generic.go:334] "Generic (PLEG): container finished" podID="02168312-40cd-4a3a-966c-49e8347c8459" containerID="7eb9e732f24af4380b07ac7b0338602687d4c83dee6e7ba24957279d1463bd6e" exitCode=0 Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.358471 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02168312-40cd-4a3a-966c-49e8347c8459","Type":"ContainerDied","Data":"4651deb0cb00d00ee5b3ad4c6288d97849f4e83793a9a8e16ab8ddb2b903f0d0"} Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.358497 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02168312-40cd-4a3a-966c-49e8347c8459","Type":"ContainerDied","Data":"7eb9e732f24af4380b07ac7b0338602687d4c83dee6e7ba24957279d1463bd6e"} Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.358508 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"02168312-40cd-4a3a-966c-49e8347c8459","Type":"ContainerDied","Data":"7416febfe3d1586cbc939ccbbb6d1431f5a43701f4eaa5a01281df11aeea38d9"} Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.358516 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7416febfe3d1586cbc939ccbbb6d1431f5a43701f4eaa5a01281df11aeea38d9" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.391089 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.501402 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02168312-40cd-4a3a-966c-49e8347c8459-etc-machine-id\") pod \"02168312-40cd-4a3a-966c-49e8347c8459\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.501473 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm92g\" (UniqueName: \"kubernetes.io/projected/02168312-40cd-4a3a-966c-49e8347c8459-kube-api-access-wm92g\") pod \"02168312-40cd-4a3a-966c-49e8347c8459\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.501541 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/02168312-40cd-4a3a-966c-49e8347c8459-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "02168312-40cd-4a3a-966c-49e8347c8459" (UID: "02168312-40cd-4a3a-966c-49e8347c8459"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.501614 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data\") pod \"02168312-40cd-4a3a-966c-49e8347c8459\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.501693 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-combined-ca-bundle\") pod \"02168312-40cd-4a3a-966c-49e8347c8459\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.501722 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-scripts\") pod \"02168312-40cd-4a3a-966c-49e8347c8459\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.501751 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data-custom\") pod \"02168312-40cd-4a3a-966c-49e8347c8459\" (UID: \"02168312-40cd-4a3a-966c-49e8347c8459\") " Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.502157 4814 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02168312-40cd-4a3a-966c-49e8347c8459-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.509002 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-scripts" (OuterVolumeSpecName: "scripts") pod "02168312-40cd-4a3a-966c-49e8347c8459" (UID: "02168312-40cd-4a3a-966c-49e8347c8459"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.532316 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02168312-40cd-4a3a-966c-49e8347c8459-kube-api-access-wm92g" (OuterVolumeSpecName: "kube-api-access-wm92g") pod "02168312-40cd-4a3a-966c-49e8347c8459" (UID: "02168312-40cd-4a3a-966c-49e8347c8459"). InnerVolumeSpecName "kube-api-access-wm92g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.534800 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "02168312-40cd-4a3a-966c-49e8347c8459" (UID: "02168312-40cd-4a3a-966c-49e8347c8459"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.566917 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "02168312-40cd-4a3a-966c-49e8347c8459" (UID: "02168312-40cd-4a3a-966c-49e8347c8459"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.603618 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.603697 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.603707 4814 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.603715 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm92g\" (UniqueName: \"kubernetes.io/projected/02168312-40cd-4a3a-966c-49e8347c8459-kube-api-access-wm92g\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.632768 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data" (OuterVolumeSpecName: "config-data") pod "02168312-40cd-4a3a-966c-49e8347c8459" (UID: "02168312-40cd-4a3a-966c-49e8347c8459"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:18 crc kubenswrapper[4814]: I0122 05:37:18.705141 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02168312-40cd-4a3a-966c-49e8347c8459-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.365123 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.373688 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.422670 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.439612 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.446445 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:19 crc kubenswrapper[4814]: E0122 05:37:19.446848 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" containerName="dnsmasq-dns" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.446866 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" containerName="dnsmasq-dns" Jan 22 05:37:19 crc kubenswrapper[4814]: E0122 05:37:19.446879 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" containerName="init" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.446886 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" containerName="init" Jan 22 05:37:19 crc kubenswrapper[4814]: E0122 05:37:19.446907 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02168312-40cd-4a3a-966c-49e8347c8459" containerName="cinder-scheduler" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.446913 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="02168312-40cd-4a3a-966c-49e8347c8459" containerName="cinder-scheduler" Jan 22 05:37:19 crc kubenswrapper[4814]: E0122 05:37:19.446924 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-api" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.446931 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-api" Jan 22 05:37:19 crc kubenswrapper[4814]: E0122 05:37:19.446963 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02168312-40cd-4a3a-966c-49e8347c8459" containerName="probe" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.446971 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="02168312-40cd-4a3a-966c-49e8347c8459" containerName="probe" Jan 22 05:37:19 crc kubenswrapper[4814]: E0122 05:37:19.446980 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-httpd" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.446986 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-httpd" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.447129 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-api" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.447143 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f8c8fc7-eaa8-4231-99b2-26c1fa17c795" containerName="dnsmasq-dns" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.447152 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4bdc5ec-1c01-4278-b941-ec748d494a8c" containerName="neutron-httpd" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.447174 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="02168312-40cd-4a3a-966c-49e8347c8459" containerName="probe" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.447190 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="02168312-40cd-4a3a-966c-49e8347c8459" containerName="cinder-scheduler" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.448047 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.454841 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.486785 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.522256 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.522347 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.522371 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-config-data\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.522431 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npbwv\" (UniqueName: \"kubernetes.io/projected/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-kube-api-access-npbwv\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.522466 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.522507 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-scripts\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.615109 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.615166 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.624346 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.624386 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-config-data\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.624449 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npbwv\" (UniqueName: \"kubernetes.io/projected/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-kube-api-access-npbwv\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.624486 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.624532 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-scripts\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.624579 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.624765 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.633273 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.637553 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-config-data\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.659985 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npbwv\" (UniqueName: \"kubernetes.io/projected/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-kube-api-access-npbwv\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.669177 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.669829 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03db3a2d-721f-44fd-9fd1-6b3ead1eaeab-scripts\") pod \"cinder-scheduler-0\" (UID: \"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab\") " pod="openstack/cinder-scheduler-0" Jan 22 05:37:19 crc kubenswrapper[4814]: I0122 05:37:19.786855 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 22 05:37:20 crc kubenswrapper[4814]: I0122 05:37:20.352695 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02168312-40cd-4a3a-966c-49e8347c8459" path="/var/lib/kubelet/pods/02168312-40cd-4a3a-966c-49e8347c8459/volumes" Jan 22 05:37:20 crc kubenswrapper[4814]: I0122 05:37:20.365687 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 22 05:37:20 crc kubenswrapper[4814]: I0122 05:37:20.384623 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6f58c7c874-mfrnc" Jan 22 05:37:20 crc kubenswrapper[4814]: I0122 05:37:20.464128 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-f6776c7f8-jxx4p"] Jan 22 05:37:20 crc kubenswrapper[4814]: I0122 05:37:20.464452 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-f6776c7f8-jxx4p" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api-log" containerID="cri-o://2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6" gracePeriod=30 Jan 22 05:37:20 crc kubenswrapper[4814]: I0122 05:37:20.464849 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-f6776c7f8-jxx4p" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api" containerID="cri-o://3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491" gracePeriod=30 Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.402231 4814 generic.go:334] "Generic (PLEG): container finished" podID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerID="2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6" exitCode=143 Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.402495 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6776c7f8-jxx4p" event={"ID":"d00c25a0-75c3-4eb7-b258-634d56bb62ff","Type":"ContainerDied","Data":"2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6"} Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.407880 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab","Type":"ContainerStarted","Data":"a972d21570923ddfc55ab23dac73cf2857f7b2d4361eea443d997c166fb03bce"} Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.407923 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab","Type":"ContainerStarted","Data":"962515ae68a1bd83a2eceebe2d0544861b731ba3c521d3da567e92f00cb1e69f"} Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.419467 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-75cf549f68-bs2gm" Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.478465 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fc797bd5d-f6wlm"] Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.478700 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon-log" containerID="cri-o://7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff" gracePeriod=30 Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.478824 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" containerID="cri-o://9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1" gracePeriod=30 Jan 22 05:37:21 crc kubenswrapper[4814]: I0122 05:37:21.497862 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Jan 22 05:37:22 crc kubenswrapper[4814]: I0122 05:37:22.423538 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"03db3a2d-721f-44fd-9fd1-6b3ead1eaeab","Type":"ContainerStarted","Data":"cbe63efaf6b4915d715839c7a1f1c86586772d834370ef0818cd2780133f5aa1"} Jan 22 05:37:22 crc kubenswrapper[4814]: I0122 05:37:22.440476 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.440460029 podStartE2EDuration="3.440460029s" podCreationTimestamp="2026-01-22 05:37:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:22.439654703 +0000 UTC m=+1128.523142918" watchObservedRunningTime="2026-01-22 05:37:22.440460029 +0000 UTC m=+1128.523948244" Jan 22 05:37:22 crc kubenswrapper[4814]: I0122 05:37:22.481947 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:37:22 crc kubenswrapper[4814]: I0122 05:37:22.777525 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-b696dd656-jb7qd" Jan 22 05:37:23 crc kubenswrapper[4814]: I0122 05:37:23.930524 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f6776c7f8-jxx4p" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:58048->10.217.0.164:9311: read: connection reset by peer" Jan 22 05:37:23 crc kubenswrapper[4814]: I0122 05:37:23.930621 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f6776c7f8-jxx4p" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:58040->10.217.0.164:9311: read: connection reset by peer" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.064683 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7775c58c77-kjdcl" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.434020 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.438659 4814 generic.go:334] "Generic (PLEG): container finished" podID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerID="3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491" exitCode=0 Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.438952 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6776c7f8-jxx4p" event={"ID":"d00c25a0-75c3-4eb7-b258-634d56bb62ff","Type":"ContainerDied","Data":"3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491"} Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.439099 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f6776c7f8-jxx4p" event={"ID":"d00c25a0-75c3-4eb7-b258-634d56bb62ff","Type":"ContainerDied","Data":"5b599705cb17f5b38aa279df8a192fe4fc676e9a0738df324e7db3cf2045cde6"} Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.439183 4814 scope.go:117] "RemoveContainer" containerID="3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.439386 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f6776c7f8-jxx4p" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.469482 4814 scope.go:117] "RemoveContainer" containerID="2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.504674 4814 scope.go:117] "RemoveContainer" containerID="3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491" Jan 22 05:37:24 crc kubenswrapper[4814]: E0122 05:37:24.505091 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491\": container with ID starting with 3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491 not found: ID does not exist" containerID="3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.505201 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491"} err="failed to get container status \"3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491\": rpc error: code = NotFound desc = could not find container \"3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491\": container with ID starting with 3f84e2b66b3d563893cdbe9b917704322d42dfceadb1f8548caee15efe92a491 not found: ID does not exist" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.505307 4814 scope.go:117] "RemoveContainer" containerID="2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6" Jan 22 05:37:24 crc kubenswrapper[4814]: E0122 05:37:24.505681 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6\": container with ID starting with 2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6 not found: ID does not exist" containerID="2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.505703 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6"} err="failed to get container status \"2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6\": rpc error: code = NotFound desc = could not find container \"2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6\": container with ID starting with 2905c6ffce29f6f344dc88877e98236ed563463f15b7566cae5fc0113dce0cf6 not found: ID does not exist" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.554738 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data-custom\") pod \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.555837 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data\") pod \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.556052 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-combined-ca-bundle\") pod \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.556151 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gx67f\" (UniqueName: \"kubernetes.io/projected/d00c25a0-75c3-4eb7-b258-634d56bb62ff-kube-api-access-gx67f\") pod \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.556263 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d00c25a0-75c3-4eb7-b258-634d56bb62ff-logs\") pod \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\" (UID: \"d00c25a0-75c3-4eb7-b258-634d56bb62ff\") " Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.557117 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d00c25a0-75c3-4eb7-b258-634d56bb62ff-logs" (OuterVolumeSpecName: "logs") pod "d00c25a0-75c3-4eb7-b258-634d56bb62ff" (UID: "d00c25a0-75c3-4eb7-b258-634d56bb62ff"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.564747 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d00c25a0-75c3-4eb7-b258-634d56bb62ff-kube-api-access-gx67f" (OuterVolumeSpecName: "kube-api-access-gx67f") pod "d00c25a0-75c3-4eb7-b258-634d56bb62ff" (UID: "d00c25a0-75c3-4eb7-b258-634d56bb62ff"). InnerVolumeSpecName "kube-api-access-gx67f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.579735 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d00c25a0-75c3-4eb7-b258-634d56bb62ff" (UID: "d00c25a0-75c3-4eb7-b258-634d56bb62ff"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.588701 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d00c25a0-75c3-4eb7-b258-634d56bb62ff" (UID: "d00c25a0-75c3-4eb7-b258-634d56bb62ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.615732 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data" (OuterVolumeSpecName: "config-data") pod "d00c25a0-75c3-4eb7-b258-634d56bb62ff" (UID: "d00c25a0-75c3-4eb7-b258-634d56bb62ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.657820 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d00c25a0-75c3-4eb7-b258-634d56bb62ff-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.657850 4814 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.657860 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.657869 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d00c25a0-75c3-4eb7-b258-634d56bb62ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.657878 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gx67f\" (UniqueName: \"kubernetes.io/projected/d00c25a0-75c3-4eb7-b258-634d56bb62ff-kube-api-access-gx67f\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.772851 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-f6776c7f8-jxx4p"] Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.782699 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-f6776c7f8-jxx4p"] Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.789716 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.933029 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:35318->10.217.0.151:8443: read: connection reset by peer" Jan 22 05:37:24 crc kubenswrapper[4814]: I0122 05:37:24.933775 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.076142 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 22 05:37:25 crc kubenswrapper[4814]: E0122 05:37:25.076481 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api-log" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.076498 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api-log" Jan 22 05:37:25 crc kubenswrapper[4814]: E0122 05:37:25.076522 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.076528 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.076687 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.076717 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" containerName="barbican-api-log" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.077268 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.079408 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.080060 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-dfk5f" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.080434 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.131335 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.166789 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d5ce9e4c-7e05-4880-8548-98a02abaf05e-openstack-config\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.166895 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5ce9e4c-7e05-4880-8548-98a02abaf05e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.166921 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wsbg\" (UniqueName: \"kubernetes.io/projected/d5ce9e4c-7e05-4880-8548-98a02abaf05e-kube-api-access-8wsbg\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.166944 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d5ce9e4c-7e05-4880-8548-98a02abaf05e-openstack-config-secret\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.268477 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d5ce9e4c-7e05-4880-8548-98a02abaf05e-openstack-config-secret\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.268573 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d5ce9e4c-7e05-4880-8548-98a02abaf05e-openstack-config\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.268663 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5ce9e4c-7e05-4880-8548-98a02abaf05e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.268685 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wsbg\" (UniqueName: \"kubernetes.io/projected/d5ce9e4c-7e05-4880-8548-98a02abaf05e-kube-api-access-8wsbg\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.270201 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d5ce9e4c-7e05-4880-8548-98a02abaf05e-openstack-config\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.275023 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d5ce9e4c-7e05-4880-8548-98a02abaf05e-openstack-config-secret\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.278233 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5ce9e4c-7e05-4880-8548-98a02abaf05e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.292730 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wsbg\" (UniqueName: \"kubernetes.io/projected/d5ce9e4c-7e05-4880-8548-98a02abaf05e-kube-api-access-8wsbg\") pod \"openstackclient\" (UID: \"d5ce9e4c-7e05-4880-8548-98a02abaf05e\") " pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.440044 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.463514 4814 generic.go:334] "Generic (PLEG): container finished" podID="50923695-9bcc-49c5-844f-6275c99729e2" containerID="9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1" exitCode=0 Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.463545 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fc797bd5d-f6wlm" event={"ID":"50923695-9bcc-49c5-844f-6275c99729e2","Type":"ContainerDied","Data":"9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1"} Jan 22 05:37:25 crc kubenswrapper[4814]: I0122 05:37:25.970110 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 22 05:37:26 crc kubenswrapper[4814]: W0122 05:37:26.005345 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd5ce9e4c_7e05_4880_8548_98a02abaf05e.slice/crio-89eb35f485b770640e5dbb47267951d14c02d6541f1629c3315b5cd26ee789b4 WatchSource:0}: Error finding container 89eb35f485b770640e5dbb47267951d14c02d6541f1629c3315b5cd26ee789b4: Status 404 returned error can't find the container with id 89eb35f485b770640e5dbb47267951d14c02d6541f1629c3315b5cd26ee789b4 Jan 22 05:37:26 crc kubenswrapper[4814]: I0122 05:37:26.360468 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d00c25a0-75c3-4eb7-b258-634d56bb62ff" path="/var/lib/kubelet/pods/d00c25a0-75c3-4eb7-b258-634d56bb62ff/volumes" Jan 22 05:37:26 crc kubenswrapper[4814]: I0122 05:37:26.474449 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d5ce9e4c-7e05-4880-8548-98a02abaf05e","Type":"ContainerStarted","Data":"89eb35f485b770640e5dbb47267951d14c02d6541f1629c3315b5cd26ee789b4"} Jan 22 05:37:29 crc kubenswrapper[4814]: I0122 05:37:29.947525 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-xlbkn"] Jan 22 05:37:29 crc kubenswrapper[4814]: I0122 05:37:29.948968 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:29 crc kubenswrapper[4814]: I0122 05:37:29.963849 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-xlbkn"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.076258 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2h7v\" (UniqueName: \"kubernetes.io/projected/347d9325-3d74-4146-94fe-c469e83043c9-kube-api-access-z2h7v\") pod \"nova-api-db-create-xlbkn\" (UID: \"347d9325-3d74-4146-94fe-c469e83043c9\") " pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.076603 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/347d9325-3d74-4146-94fe-c469e83043c9-operator-scripts\") pod \"nova-api-db-create-xlbkn\" (UID: \"347d9325-3d74-4146-94fe-c469e83043c9\") " pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.149828 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-5bec-account-create-update-br287"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.150908 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.152764 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.169019 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-5bec-account-create-update-br287"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.186742 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2h7v\" (UniqueName: \"kubernetes.io/projected/347d9325-3d74-4146-94fe-c469e83043c9-kube-api-access-z2h7v\") pod \"nova-api-db-create-xlbkn\" (UID: \"347d9325-3d74-4146-94fe-c469e83043c9\") " pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.186831 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/347d9325-3d74-4146-94fe-c469e83043c9-operator-scripts\") pod \"nova-api-db-create-xlbkn\" (UID: \"347d9325-3d74-4146-94fe-c469e83043c9\") " pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.187458 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/347d9325-3d74-4146-94fe-c469e83043c9-operator-scripts\") pod \"nova-api-db-create-xlbkn\" (UID: \"347d9325-3d74-4146-94fe-c469e83043c9\") " pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.234659 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2h7v\" (UniqueName: \"kubernetes.io/projected/347d9325-3d74-4146-94fe-c469e83043c9-kube-api-access-z2h7v\") pod \"nova-api-db-create-xlbkn\" (UID: \"347d9325-3d74-4146-94fe-c469e83043c9\") " pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.255560 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-7dddp"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.262854 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.270062 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.278720 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.293345 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hq2f\" (UniqueName: \"kubernetes.io/projected/9b993bf4-b353-4ca1-a01c-cfbae095a030-kube-api-access-7hq2f\") pod \"nova-api-5bec-account-create-update-br287\" (UID: \"9b993bf4-b353-4ca1-a01c-cfbae095a030\") " pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.293733 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b993bf4-b353-4ca1-a01c-cfbae095a030-operator-scripts\") pod \"nova-api-5bec-account-create-update-br287\" (UID: \"9b993bf4-b353-4ca1-a01c-cfbae095a030\") " pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.333063 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7dddp"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.394878 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b993bf4-b353-4ca1-a01c-cfbae095a030-operator-scripts\") pod \"nova-api-5bec-account-create-update-br287\" (UID: \"9b993bf4-b353-4ca1-a01c-cfbae095a030\") " pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.394951 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67b4bdd2-9667-497f-a85c-25d2c479e713-operator-scripts\") pod \"nova-cell0-db-create-7dddp\" (UID: \"67b4bdd2-9667-497f-a85c-25d2c479e713\") " pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.394980 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hq2f\" (UniqueName: \"kubernetes.io/projected/9b993bf4-b353-4ca1-a01c-cfbae095a030-kube-api-access-7hq2f\") pod \"nova-api-5bec-account-create-update-br287\" (UID: \"9b993bf4-b353-4ca1-a01c-cfbae095a030\") " pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.395039 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl6kj\" (UniqueName: \"kubernetes.io/projected/67b4bdd2-9667-497f-a85c-25d2c479e713-kube-api-access-gl6kj\") pod \"nova-cell0-db-create-7dddp\" (UID: \"67b4bdd2-9667-497f-a85c-25d2c479e713\") " pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.396022 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b993bf4-b353-4ca1-a01c-cfbae095a030-operator-scripts\") pod \"nova-api-5bec-account-create-update-br287\" (UID: \"9b993bf4-b353-4ca1-a01c-cfbae095a030\") " pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.400695 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-tcp2s"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.401900 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.410541 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-tcp2s"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.427877 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-e0c7-account-create-update-gr2s7"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.429165 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e0c7-account-create-update-gr2s7"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.429303 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.438661 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.440143 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hq2f\" (UniqueName: \"kubernetes.io/projected/9b993bf4-b353-4ca1-a01c-cfbae095a030-kube-api-access-7hq2f\") pod \"nova-api-5bec-account-create-update-br287\" (UID: \"9b993bf4-b353-4ca1-a01c-cfbae095a030\") " pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.486727 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.496904 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjpgd\" (UniqueName: \"kubernetes.io/projected/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-kube-api-access-hjpgd\") pod \"nova-cell1-db-create-tcp2s\" (UID: \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\") " pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.496946 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-operator-scripts\") pod \"nova-cell0-e0c7-account-create-update-gr2s7\" (UID: \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\") " pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.496972 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smkdj\" (UniqueName: \"kubernetes.io/projected/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-kube-api-access-smkdj\") pod \"nova-cell0-e0c7-account-create-update-gr2s7\" (UID: \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\") " pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.497166 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67b4bdd2-9667-497f-a85c-25d2c479e713-operator-scripts\") pod \"nova-cell0-db-create-7dddp\" (UID: \"67b4bdd2-9667-497f-a85c-25d2c479e713\") " pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.497380 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-operator-scripts\") pod \"nova-cell1-db-create-tcp2s\" (UID: \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\") " pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.497464 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl6kj\" (UniqueName: \"kubernetes.io/projected/67b4bdd2-9667-497f-a85c-25d2c479e713-kube-api-access-gl6kj\") pod \"nova-cell0-db-create-7dddp\" (UID: \"67b4bdd2-9667-497f-a85c-25d2c479e713\") " pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.497833 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67b4bdd2-9667-497f-a85c-25d2c479e713-operator-scripts\") pod \"nova-cell0-db-create-7dddp\" (UID: \"67b4bdd2-9667-497f-a85c-25d2c479e713\") " pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.528188 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl6kj\" (UniqueName: \"kubernetes.io/projected/67b4bdd2-9667-497f-a85c-25d2c479e713-kube-api-access-gl6kj\") pod \"nova-cell0-db-create-7dddp\" (UID: \"67b4bdd2-9667-497f-a85c-25d2c479e713\") " pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.549062 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-a091-account-create-update-pkqsv"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.555541 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.557789 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.561075 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a091-account-create-update-pkqsv"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.599213 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-operator-scripts\") pod \"nova-cell1-db-create-tcp2s\" (UID: \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\") " pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.599323 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjpgd\" (UniqueName: \"kubernetes.io/projected/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-kube-api-access-hjpgd\") pod \"nova-cell1-db-create-tcp2s\" (UID: \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\") " pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.599349 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-operator-scripts\") pod \"nova-cell0-e0c7-account-create-update-gr2s7\" (UID: \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\") " pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.599370 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smkdj\" (UniqueName: \"kubernetes.io/projected/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-kube-api-access-smkdj\") pod \"nova-cell0-e0c7-account-create-update-gr2s7\" (UID: \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\") " pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.600326 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-operator-scripts\") pod \"nova-cell0-e0c7-account-create-update-gr2s7\" (UID: \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\") " pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.600539 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-operator-scripts\") pod \"nova-cell1-db-create-tcp2s\" (UID: \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\") " pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.614982 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smkdj\" (UniqueName: \"kubernetes.io/projected/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-kube-api-access-smkdj\") pod \"nova-cell0-e0c7-account-create-update-gr2s7\" (UID: \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\") " pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.616696 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjpgd\" (UniqueName: \"kubernetes.io/projected/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-kube-api-access-hjpgd\") pod \"nova-cell1-db-create-tcp2s\" (UID: \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\") " pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.627720 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.666390 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.667918 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="ceilometer-central-agent" containerID="cri-o://ca744b5c40e9c00e28756235b6a42b73fa5ba575ca0c1827955d1411ef7d479b" gracePeriod=30 Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.668419 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="ceilometer-notification-agent" containerID="cri-o://c9b104bef1e03ac41be6e83aa258814be1cf2cfa2fbcae5ebc32a04ffc9073c9" gracePeriod=30 Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.668492 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="sg-core" containerID="cri-o://0b862f8c6bac865d13a32133d13d7f93dac606214863cfa8db354bbea0c98d0c" gracePeriod=30 Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.668116 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="proxy-httpd" containerID="cri-o://bee72efb9bdc072949d09f555869d9fe9b71a6c67fa86c3e7cee6c0091fc716b" gracePeriod=30 Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.685025 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.168:3000/\": EOF" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.700680 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh8v9\" (UniqueName: \"kubernetes.io/projected/8975f4f8-ca9b-483c-9627-266538c2036f-kube-api-access-vh8v9\") pod \"nova-cell1-a091-account-create-update-pkqsv\" (UID: \"8975f4f8-ca9b-483c-9627-266538c2036f\") " pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.700777 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8975f4f8-ca9b-483c-9627-266538c2036f-operator-scripts\") pod \"nova-cell1-a091-account-create-update-pkqsv\" (UID: \"8975f4f8-ca9b-483c-9627-266538c2036f\") " pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.751743 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6c6c4b85d7-qfbkp"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.753363 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.756306 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.757143 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.763127 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6c6c4b85d7-qfbkp"] Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.765452 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.773538 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.783924 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.802376 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh8v9\" (UniqueName: \"kubernetes.io/projected/8975f4f8-ca9b-483c-9627-266538c2036f-kube-api-access-vh8v9\") pod \"nova-cell1-a091-account-create-update-pkqsv\" (UID: \"8975f4f8-ca9b-483c-9627-266538c2036f\") " pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.802478 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8975f4f8-ca9b-483c-9627-266538c2036f-operator-scripts\") pod \"nova-cell1-a091-account-create-update-pkqsv\" (UID: \"8975f4f8-ca9b-483c-9627-266538c2036f\") " pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.803141 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8975f4f8-ca9b-483c-9627-266538c2036f-operator-scripts\") pod \"nova-cell1-a091-account-create-update-pkqsv\" (UID: \"8975f4f8-ca9b-483c-9627-266538c2036f\") " pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.829360 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh8v9\" (UniqueName: \"kubernetes.io/projected/8975f4f8-ca9b-483c-9627-266538c2036f-kube-api-access-vh8v9\") pod \"nova-cell1-a091-account-create-update-pkqsv\" (UID: \"8975f4f8-ca9b-483c-9627-266538c2036f\") " pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.873488 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.904175 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dclb9\" (UniqueName: \"kubernetes.io/projected/316037aa-fe14-4391-b010-8e0964a4758a-kube-api-access-dclb9\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.904294 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-internal-tls-certs\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.904358 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-public-tls-certs\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.904407 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/316037aa-fe14-4391-b010-8e0964a4758a-run-httpd\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.904460 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/316037aa-fe14-4391-b010-8e0964a4758a-log-httpd\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.904610 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-combined-ca-bundle\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.905065 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-config-data\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:30 crc kubenswrapper[4814]: I0122 05:37:30.905253 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/316037aa-fe14-4391-b010-8e0964a4758a-etc-swift\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.006583 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/316037aa-fe14-4391-b010-8e0964a4758a-etc-swift\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.006658 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dclb9\" (UniqueName: \"kubernetes.io/projected/316037aa-fe14-4391-b010-8e0964a4758a-kube-api-access-dclb9\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.006678 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-internal-tls-certs\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.006697 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-public-tls-certs\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.006721 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/316037aa-fe14-4391-b010-8e0964a4758a-run-httpd\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.006737 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/316037aa-fe14-4391-b010-8e0964a4758a-log-httpd\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.006766 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-combined-ca-bundle\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.006836 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-config-data\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.007934 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/316037aa-fe14-4391-b010-8e0964a4758a-run-httpd\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.008566 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/316037aa-fe14-4391-b010-8e0964a4758a-log-httpd\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.013406 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-combined-ca-bundle\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.013598 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/316037aa-fe14-4391-b010-8e0964a4758a-etc-swift\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.014688 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-config-data\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.014693 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-internal-tls-certs\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.019085 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/316037aa-fe14-4391-b010-8e0964a4758a-public-tls-certs\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.028863 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dclb9\" (UniqueName: \"kubernetes.io/projected/316037aa-fe14-4391-b010-8e0964a4758a-kube-api-access-dclb9\") pod \"swift-proxy-6c6c4b85d7-qfbkp\" (UID: \"316037aa-fe14-4391-b010-8e0964a4758a\") " pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.071009 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.521566 4814 generic.go:334] "Generic (PLEG): container finished" podID="2a029b9f-d28b-41af-b400-15506591e866" containerID="bee72efb9bdc072949d09f555869d9fe9b71a6c67fa86c3e7cee6c0091fc716b" exitCode=0 Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.521598 4814 generic.go:334] "Generic (PLEG): container finished" podID="2a029b9f-d28b-41af-b400-15506591e866" containerID="0b862f8c6bac865d13a32133d13d7f93dac606214863cfa8db354bbea0c98d0c" exitCode=2 Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.521606 4814 generic.go:334] "Generic (PLEG): container finished" podID="2a029b9f-d28b-41af-b400-15506591e866" containerID="ca744b5c40e9c00e28756235b6a42b73fa5ba575ca0c1827955d1411ef7d479b" exitCode=0 Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.521638 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerDied","Data":"bee72efb9bdc072949d09f555869d9fe9b71a6c67fa86c3e7cee6c0091fc716b"} Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.521661 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerDied","Data":"0b862f8c6bac865d13a32133d13d7f93dac606214863cfa8db354bbea0c98d0c"} Jan 22 05:37:31 crc kubenswrapper[4814]: I0122 05:37:31.521670 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerDied","Data":"ca744b5c40e9c00e28756235b6a42b73fa5ba575ca0c1827955d1411ef7d479b"} Jan 22 05:37:33 crc kubenswrapper[4814]: I0122 05:37:33.100839 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Jan 22 05:37:33 crc kubenswrapper[4814]: I0122 05:37:33.548025 4814 generic.go:334] "Generic (PLEG): container finished" podID="2a029b9f-d28b-41af-b400-15506591e866" containerID="c9b104bef1e03ac41be6e83aa258814be1cf2cfa2fbcae5ebc32a04ffc9073c9" exitCode=0 Jan 22 05:37:33 crc kubenswrapper[4814]: I0122 05:37:33.548065 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerDied","Data":"c9b104bef1e03ac41be6e83aa258814be1cf2cfa2fbcae5ebc32a04ffc9073c9"} Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.572435 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-78869465b8-8rvmm"] Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.581512 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.588088 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.588068 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-ffkm2" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.590618 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.615887 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-78869465b8-8rvmm"] Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.669066 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data-custom\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.669159 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-combined-ca-bundle\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.669238 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.669307 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-448m5\" (UniqueName: \"kubernetes.io/projected/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-kube-api-access-448m5\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.710932 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-86d756df4f-476sb"] Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.730506 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.736108 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.769106 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-86d756df4f-476sb"] Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.771929 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-448m5\" (UniqueName: \"kubernetes.io/projected/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-kube-api-access-448m5\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.772108 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data-custom\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.772165 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-combined-ca-bundle\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.772223 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.784224 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data-custom\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.785090 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-combined-ca-bundle\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.817536 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.822165 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-448m5\" (UniqueName: \"kubernetes.io/projected/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-kube-api-access-448m5\") pod \"heat-engine-78869465b8-8rvmm\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.869377 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-ffwm5"] Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.870865 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.883558 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-ffwm5"] Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.891656 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.891777 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-combined-ca-bundle\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.891859 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zvcg\" (UniqueName: \"kubernetes.io/projected/c33be205-b621-4a36-8cd8-2e30db89269c-kube-api-access-9zvcg\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.891914 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data-custom\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.911078 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.975761 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5d54fb775f-hkkfd"] Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.977990 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:34 crc kubenswrapper[4814]: I0122 05:37:34.981592 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.007599 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-config\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.008840 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.009001 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data-custom\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.010440 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.010488 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cq9w\" (UniqueName: \"kubernetes.io/projected/c435e1dd-d906-4003-94cd-e78a57e0ab26-kube-api-access-7cq9w\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.010521 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.010684 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-combined-ca-bundle\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.010740 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.010900 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.010952 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zvcg\" (UniqueName: \"kubernetes.io/projected/c33be205-b621-4a36-8cd8-2e30db89269c-kube-api-access-9zvcg\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.028444 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zvcg\" (UniqueName: \"kubernetes.io/projected/c33be205-b621-4a36-8cd8-2e30db89269c-kube-api-access-9zvcg\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.049619 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-combined-ca-bundle\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.059114 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5d54fb775f-hkkfd"] Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.072443 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data-custom\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.075448 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data\") pod \"heat-cfnapi-86d756df4f-476sb\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.100069 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112171 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-config\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112214 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112309 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data-custom\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112336 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfcjv\" (UniqueName: \"kubernetes.io/projected/4b566de2-fe37-4017-8360-0d6d3c2ce4be-kube-api-access-wfcjv\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112358 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112373 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cq9w\" (UniqueName: \"kubernetes.io/projected/c435e1dd-d906-4003-94cd-e78a57e0ab26-kube-api-access-7cq9w\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112395 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-combined-ca-bundle\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112411 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112452 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.112491 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.113406 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.114699 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.114984 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.116330 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-config\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.118600 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.139824 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cq9w\" (UniqueName: \"kubernetes.io/projected/c435e1dd-d906-4003-94cd-e78a57e0ab26-kube-api-access-7cq9w\") pod \"dnsmasq-dns-f6bc4c6c9-ffwm5\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.214985 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data-custom\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.215029 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfcjv\" (UniqueName: \"kubernetes.io/projected/4b566de2-fe37-4017-8360-0d6d3c2ce4be-kube-api-access-wfcjv\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.215061 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-combined-ca-bundle\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.215077 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.219969 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.220731 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.222354 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-combined-ca-bundle\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.230550 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.235427 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfcjv\" (UniqueName: \"kubernetes.io/projected/4b566de2-fe37-4017-8360-0d6d3c2ce4be-kube-api-access-wfcjv\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.236272 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data-custom\") pod \"heat-api-5d54fb775f-hkkfd\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:35 crc kubenswrapper[4814]: I0122 05:37:35.301390 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:37 crc kubenswrapper[4814]: I0122 05:37:37.186533 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.168:3000/\": dial tcp 10.217.0.168:3000: connect: connection refused" Jan 22 05:37:38 crc kubenswrapper[4814]: I0122 05:37:38.147900 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-58687b7457-jl86n" Jan 22 05:37:38 crc kubenswrapper[4814]: I0122 05:37:38.224167 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-787f7bb69b-kz5kq"] Jan 22 05:37:38 crc kubenswrapper[4814]: I0122 05:37:38.224385 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-787f7bb69b-kz5kq" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerName="neutron-api" containerID="cri-o://5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9" gracePeriod=30 Jan 22 05:37:38 crc kubenswrapper[4814]: I0122 05:37:38.224792 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-787f7bb69b-kz5kq" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerName="neutron-httpd" containerID="cri-o://cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684" gracePeriod=30 Jan 22 05:37:38 crc kubenswrapper[4814]: I0122 05:37:38.660476 4814 generic.go:334] "Generic (PLEG): container finished" podID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerID="cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684" exitCode=0 Jan 22 05:37:38 crc kubenswrapper[4814]: I0122 05:37:38.660799 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-787f7bb69b-kz5kq" event={"ID":"a693759e-220d-4f38-ab6d-e4e21b91fefa","Type":"ContainerDied","Data":"cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684"} Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.089405 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.193151 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-scripts\") pod \"2a029b9f-d28b-41af-b400-15506591e866\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.195905 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-combined-ca-bundle\") pod \"2a029b9f-d28b-41af-b400-15506591e866\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.196327 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-config-data\") pod \"2a029b9f-d28b-41af-b400-15506591e866\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.196468 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-log-httpd\") pod \"2a029b9f-d28b-41af-b400-15506591e866\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.199407 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxh47\" (UniqueName: \"kubernetes.io/projected/2a029b9f-d28b-41af-b400-15506591e866-kube-api-access-nxh47\") pod \"2a029b9f-d28b-41af-b400-15506591e866\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.199542 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-run-httpd\") pod \"2a029b9f-d28b-41af-b400-15506591e866\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.199651 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-sg-core-conf-yaml\") pod \"2a029b9f-d28b-41af-b400-15506591e866\" (UID: \"2a029b9f-d28b-41af-b400-15506591e866\") " Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.206099 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2a029b9f-d28b-41af-b400-15506591e866" (UID: "2a029b9f-d28b-41af-b400-15506591e866"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.206363 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2a029b9f-d28b-41af-b400-15506591e866" (UID: "2a029b9f-d28b-41af-b400-15506591e866"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.223430 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a029b9f-d28b-41af-b400-15506591e866-kube-api-access-nxh47" (OuterVolumeSpecName: "kube-api-access-nxh47") pod "2a029b9f-d28b-41af-b400-15506591e866" (UID: "2a029b9f-d28b-41af-b400-15506591e866"). InnerVolumeSpecName "kube-api-access-nxh47". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.289821 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-scripts" (OuterVolumeSpecName: "scripts") pod "2a029b9f-d28b-41af-b400-15506591e866" (UID: "2a029b9f-d28b-41af-b400-15506591e866"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.303816 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.304035 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxh47\" (UniqueName: \"kubernetes.io/projected/2a029b9f-d28b-41af-b400-15506591e866-kube-api-access-nxh47\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.304047 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2a029b9f-d28b-41af-b400-15506591e866-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.304055 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.405238 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2a029b9f-d28b-41af-b400-15506591e866" (UID: "2a029b9f-d28b-41af-b400-15506591e866"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.406368 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.413442 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-86d756df4f-476sb"] Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.599802 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a029b9f-d28b-41af-b400-15506591e866" (UID: "2a029b9f-d28b-41af-b400-15506591e866"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.613097 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.742963 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-config-data" (OuterVolumeSpecName: "config-data") pod "2a029b9f-d28b-41af-b400-15506591e866" (UID: "2a029b9f-d28b-41af-b400-15506591e866"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.817128 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2a029b9f-d28b-41af-b400-15506591e866","Type":"ContainerDied","Data":"11d51cfbdd02dd3db836f0ae60cc8ae82938e1a01893c1394f2db5675bd30bba"} Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.817182 4814 scope.go:117] "RemoveContainer" containerID="bee72efb9bdc072949d09f555869d9fe9b71a6c67fa86c3e7cee6c0091fc716b" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.817362 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.821112 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a029b9f-d28b-41af-b400-15506591e866-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.931735 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.957382 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.962044 4814 scope.go:117] "RemoveContainer" containerID="0b862f8c6bac865d13a32133d13d7f93dac606214863cfa8db354bbea0c98d0c" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.971102 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:39 crc kubenswrapper[4814]: E0122 05:37:39.971714 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="proxy-httpd" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.972488 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="proxy-httpd" Jan 22 05:37:39 crc kubenswrapper[4814]: E0122 05:37:39.972603 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="sg-core" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.972685 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="sg-core" Jan 22 05:37:39 crc kubenswrapper[4814]: E0122 05:37:39.972741 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="ceilometer-central-agent" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.972789 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="ceilometer-central-agent" Jan 22 05:37:39 crc kubenswrapper[4814]: E0122 05:37:39.972849 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="ceilometer-notification-agent" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.972899 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="ceilometer-notification-agent" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.973135 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="ceilometer-notification-agent" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.973194 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="sg-core" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.973247 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="proxy-httpd" Jan 22 05:37:39 crc kubenswrapper[4814]: I0122 05:37:39.973302 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a029b9f-d28b-41af-b400-15506591e866" containerName="ceilometer-central-agent" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.015421 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.018843 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.019060 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.034684 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5d54fb775f-hkkfd"] Jan 22 05:37:40 crc kubenswrapper[4814]: W0122 05:37:40.046213 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b566de2_fe37_4017_8360_0d6d3c2ce4be.slice/crio-a449548325ca93ed6a20cb7456748621ae10bd7a70adf8ef8368cddfdc12eac8 WatchSource:0}: Error finding container a449548325ca93ed6a20cb7456748621ae10bd7a70adf8ef8368cddfdc12eac8: Status 404 returned error can't find the container with id a449548325ca93ed6a20cb7456748621ae10bd7a70adf8ef8368cddfdc12eac8 Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.066980 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.115855 4814 scope.go:117] "RemoveContainer" containerID="c9b104bef1e03ac41be6e83aa258814be1cf2cfa2fbcae5ebc32a04ffc9073c9" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.130471 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-log-httpd\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.130551 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzg2f\" (UniqueName: \"kubernetes.io/projected/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-kube-api-access-gzg2f\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.130611 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-config-data\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.130642 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.130665 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-run-httpd\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.130683 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.130741 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-scripts\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.172257 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-5bec-account-create-update-br287"] Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.221217 4814 scope.go:117] "RemoveContainer" containerID="ca744b5c40e9c00e28756235b6a42b73fa5ba575ca0c1827955d1411ef7d479b" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.232288 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-config-data\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.232314 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.232336 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-run-httpd\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.232354 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.232409 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-scripts\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.232442 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-log-httpd\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.232490 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzg2f\" (UniqueName: \"kubernetes.io/projected/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-kube-api-access-gzg2f\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.239161 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.239428 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-run-httpd\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.239647 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-config-data\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.239741 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-log-httpd\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.243786 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.246359 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-scripts\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.254939 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzg2f\" (UniqueName: \"kubernetes.io/projected/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-kube-api-access-gzg2f\") pod \"ceilometer-0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.272559 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.358804 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a029b9f-d28b-41af-b400-15506591e866" path="/var/lib/kubelet/pods/2a029b9f-d28b-41af-b400-15506591e866/volumes" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.396236 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.797510 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-78869465b8-8rvmm"] Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.856916 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e0c7-account-create-update-gr2s7"] Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.898920 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d5ce9e4c-7e05-4880-8548-98a02abaf05e","Type":"ContainerStarted","Data":"137bf12055e68889ebed61f79af5841547ccf9371d5cd0ba87c6c707d71ece67"} Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.914322 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86d756df4f-476sb" event={"ID":"c33be205-b621-4a36-8cd8-2e30db89269c","Type":"ContainerStarted","Data":"94d2d2e6e5c84e4df89b296ac368c20bd8791cf00485d2512a6ac7a7ee036321"} Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.918561 4814 generic.go:334] "Generic (PLEG): container finished" podID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerID="75a7c7e8f31dcb8dfbd2521495ef733d4f7376bac2f0ac0b0e7c09cf62550f7f" exitCode=137 Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.918682 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"814cb7df-caa1-49f3-a26a-7aea04b643e8","Type":"ContainerDied","Data":"75a7c7e8f31dcb8dfbd2521495ef733d4f7376bac2f0ac0b0e7c09cf62550f7f"} Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.925296 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d54fb775f-hkkfd" event={"ID":"4b566de2-fe37-4017-8360-0d6d3c2ce4be","Type":"ContainerStarted","Data":"a449548325ca93ed6a20cb7456748621ae10bd7a70adf8ef8368cddfdc12eac8"} Jan 22 05:37:40 crc kubenswrapper[4814]: W0122 05:37:40.928334 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8975f4f8_ca9b_483c_9627_266538c2036f.slice/crio-5aaa201163aec083a9dd6e795ec32f51192bf50e1dc57507c8ddc7c370b5b3ea WatchSource:0}: Error finding container 5aaa201163aec083a9dd6e795ec32f51192bf50e1dc57507c8ddc7c370b5b3ea: Status 404 returned error can't find the container with id 5aaa201163aec083a9dd6e795ec32f51192bf50e1dc57507c8ddc7c370b5b3ea Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.948446 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5bec-account-create-update-br287" event={"ID":"9b993bf4-b353-4ca1-a01c-cfbae095a030","Type":"ContainerStarted","Data":"92b63d94cecd3fc08e467870add701dee2bc20eb579b6e3e79f689710dc87e7a"} Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.948488 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5bec-account-create-update-br287" event={"ID":"9b993bf4-b353-4ca1-a01c-cfbae095a030","Type":"ContainerStarted","Data":"605ba44e364894e733387b874b236a3e787f41eef02e2045131f622f61ebb595"} Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.953741 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 22 05:37:40 crc kubenswrapper[4814]: I0122 05:37:40.975516 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-ffwm5"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.072326 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-tcp2s"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.113974 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-7dddp"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.139703 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a091-account-create-update-pkqsv"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.179914 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.181953 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-xlbkn"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.184211 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.491562159 podStartE2EDuration="16.184191375s" podCreationTimestamp="2026-01-22 05:37:25 +0000 UTC" firstStartedPulling="2026-01-22 05:37:26.016186449 +0000 UTC m=+1132.099674664" lastFinishedPulling="2026-01-22 05:37:38.708815675 +0000 UTC m=+1144.792303880" observedRunningTime="2026-01-22 05:37:40.929240972 +0000 UTC m=+1147.012729197" watchObservedRunningTime="2026-01-22 05:37:41.184191375 +0000 UTC m=+1147.267679590" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.213380 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-5bec-account-create-update-br287" podStartSLOduration=11.213361061 podStartE2EDuration="11.213361061s" podCreationTimestamp="2026-01-22 05:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:41.001085137 +0000 UTC m=+1147.084573352" watchObservedRunningTime="2026-01-22 05:37:41.213361061 +0000 UTC m=+1147.296849276" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.253170 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-7c6997bf6c-nnkzn"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.254327 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.292815 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-767ffccf56-cjdh4"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.294327 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.324492 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5cf6ffb976-669wk"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.325741 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.362063 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7c6997bf6c-nnkzn"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.385737 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-767ffccf56-cjdh4"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.408957 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fvcd\" (UniqueName: \"kubernetes.io/projected/09049d3c-1578-479a-b0e4-c853df37c918-kube-api-access-9fvcd\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.409292 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfbnc\" (UniqueName: \"kubernetes.io/projected/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-kube-api-access-zfbnc\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.409420 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data-custom\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.409492 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.409567 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-combined-ca-bundle\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.409653 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-config-data-custom\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.409752 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m7qd\" (UniqueName: \"kubernetes.io/projected/72882366-342a-42e4-a12c-c40850b7358d-kube-api-access-6m7qd\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.409862 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-config-data\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.409934 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-combined-ca-bundle\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.410001 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data-custom\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.410094 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-combined-ca-bundle\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.410204 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.411073 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6c6c4b85d7-qfbkp"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.427731 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5cf6ffb976-669wk"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.507535 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.512669 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-combined-ca-bundle\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.512742 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.512790 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fvcd\" (UniqueName: \"kubernetes.io/projected/09049d3c-1578-479a-b0e4-c853df37c918-kube-api-access-9fvcd\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.512864 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfbnc\" (UniqueName: \"kubernetes.io/projected/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-kube-api-access-zfbnc\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.512908 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data-custom\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.512933 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.512961 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-combined-ca-bundle\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.512986 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-config-data-custom\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.513016 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m7qd\" (UniqueName: \"kubernetes.io/projected/72882366-342a-42e4-a12c-c40850b7358d-kube-api-access-6m7qd\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.513056 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-config-data\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.513073 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-combined-ca-bundle\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.513090 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data-custom\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.522664 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-combined-ca-bundle\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.523615 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-config-data\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.524404 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-config-data-custom\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.528301 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.528934 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-combined-ca-bundle\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.536461 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data-custom\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.537325 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data-custom\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.538400 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.547356 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m7qd\" (UniqueName: \"kubernetes.io/projected/72882366-342a-42e4-a12c-c40850b7358d-kube-api-access-6m7qd\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.562012 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fvcd\" (UniqueName: \"kubernetes.io/projected/09049d3c-1578-479a-b0e4-c853df37c918-kube-api-access-9fvcd\") pod \"heat-api-767ffccf56-cjdh4\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.572298 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfbnc\" (UniqueName: \"kubernetes.io/projected/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-kube-api-access-zfbnc\") pod \"heat-cfnapi-5cf6ffb976-669wk\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.577505 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72882366-342a-42e4-a12c-c40850b7358d-combined-ca-bundle\") pod \"heat-engine-7c6997bf6c-nnkzn\" (UID: \"72882366-342a-42e4-a12c-c40850b7358d\") " pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.587173 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.784455 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.787253 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.921327 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-scripts\") pod \"814cb7df-caa1-49f3-a26a-7aea04b643e8\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.921623 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data\") pod \"814cb7df-caa1-49f3-a26a-7aea04b643e8\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.921792 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-295sh\" (UniqueName: \"kubernetes.io/projected/814cb7df-caa1-49f3-a26a-7aea04b643e8-kube-api-access-295sh\") pod \"814cb7df-caa1-49f3-a26a-7aea04b643e8\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.921821 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data-custom\") pod \"814cb7df-caa1-49f3-a26a-7aea04b643e8\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.921852 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/814cb7df-caa1-49f3-a26a-7aea04b643e8-logs\") pod \"814cb7df-caa1-49f3-a26a-7aea04b643e8\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.921872 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-combined-ca-bundle\") pod \"814cb7df-caa1-49f3-a26a-7aea04b643e8\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.921933 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/814cb7df-caa1-49f3-a26a-7aea04b643e8-etc-machine-id\") pod \"814cb7df-caa1-49f3-a26a-7aea04b643e8\" (UID: \"814cb7df-caa1-49f3-a26a-7aea04b643e8\") " Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.927874 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/814cb7df-caa1-49f3-a26a-7aea04b643e8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "814cb7df-caa1-49f3-a26a-7aea04b643e8" (UID: "814cb7df-caa1-49f3-a26a-7aea04b643e8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.930274 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/814cb7df-caa1-49f3-a26a-7aea04b643e8-logs" (OuterVolumeSpecName: "logs") pod "814cb7df-caa1-49f3-a26a-7aea04b643e8" (UID: "814cb7df-caa1-49f3-a26a-7aea04b643e8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.931141 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-scripts" (OuterVolumeSpecName: "scripts") pod "814cb7df-caa1-49f3-a26a-7aea04b643e8" (UID: "814cb7df-caa1-49f3-a26a-7aea04b643e8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.948764 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.957094 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/814cb7df-caa1-49f3-a26a-7aea04b643e8-kube-api-access-295sh" (OuterVolumeSpecName: "kube-api-access-295sh") pod "814cb7df-caa1-49f3-a26a-7aea04b643e8" (UID: "814cb7df-caa1-49f3-a26a-7aea04b643e8"). InnerVolumeSpecName "kube-api-access-295sh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.960125 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "814cb7df-caa1-49f3-a26a-7aea04b643e8" (UID: "814cb7df-caa1-49f3-a26a-7aea04b643e8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.963965 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.966272 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78869465b8-8rvmm" event={"ID":"497ad917-0e7c-41f5-ba64-29a3f5e71ca3","Type":"ContainerStarted","Data":"a805cf5cb7ed60e2754757ce1938822c2ff601297a237af5315d6ebf89ea1123"} Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.993509 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tcp2s" event={"ID":"fee64fd2-2b4c-4b2c-9041-590c282c2e5b","Type":"ContainerStarted","Data":"eb0c548282539e794c3ee29d0f306e6c108d20301212e4cb421f3fecc21065a8"} Jan 22 05:37:41 crc kubenswrapper[4814]: I0122 05:37:41.993546 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tcp2s" event={"ID":"fee64fd2-2b4c-4b2c-9041-590c282c2e5b","Type":"ContainerStarted","Data":"2cdc8afc38e099d463b259725bfccac00c7385c97fbba2863e9e04778b9f02c7"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.004977 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"814cb7df-caa1-49f3-a26a-7aea04b643e8","Type":"ContainerDied","Data":"11841e8777cd9a54f78584ceee81b62cedcd3148a824d785c77712bfa3e68f9f"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.005025 4814 scope.go:117] "RemoveContainer" containerID="75a7c7e8f31dcb8dfbd2521495ef733d4f7376bac2f0ac0b0e7c09cf62550f7f" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.005157 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.008844 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" event={"ID":"c435e1dd-d906-4003-94cd-e78a57e0ab26","Type":"ContainerStarted","Data":"ff10da929050437b985e77412f8b20694a372a13f57c643a1623a35618c6a9cd"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.019466 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" event={"ID":"8975f4f8-ca9b-483c-9627-266538c2036f","Type":"ContainerStarted","Data":"5aaa201163aec083a9dd6e795ec32f51192bf50e1dc57507c8ddc7c370b5b3ea"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.023128 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-ovndb-tls-certs\") pod \"a693759e-220d-4f38-ab6d-e4e21b91fefa\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.023341 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47gql\" (UniqueName: \"kubernetes.io/projected/a693759e-220d-4f38-ab6d-e4e21b91fefa-kube-api-access-47gql\") pod \"a693759e-220d-4f38-ab6d-e4e21b91fefa\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.023385 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-config\") pod \"a693759e-220d-4f38-ab6d-e4e21b91fefa\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.023470 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-combined-ca-bundle\") pod \"a693759e-220d-4f38-ab6d-e4e21b91fefa\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.023511 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-httpd-config\") pod \"a693759e-220d-4f38-ab6d-e4e21b91fefa\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.024030 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-295sh\" (UniqueName: \"kubernetes.io/projected/814cb7df-caa1-49f3-a26a-7aea04b643e8-kube-api-access-295sh\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.024042 4814 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.024051 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/814cb7df-caa1-49f3-a26a-7aea04b643e8-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.024060 4814 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/814cb7df-caa1-49f3-a26a-7aea04b643e8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.024068 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.042573 4814 generic.go:334] "Generic (PLEG): container finished" podID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerID="5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9" exitCode=0 Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.043862 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-787f7bb69b-kz5kq" event={"ID":"a693759e-220d-4f38-ab6d-e4e21b91fefa","Type":"ContainerDied","Data":"5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.044023 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-787f7bb69b-kz5kq" event={"ID":"a693759e-220d-4f38-ab6d-e4e21b91fefa","Type":"ContainerDied","Data":"6c619aca2255e0d51789df55bb94de81848a16ecf122ba79589d5de73f2bf624"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.044369 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-787f7bb69b-kz5kq" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.046490 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" event={"ID":"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e","Type":"ContainerStarted","Data":"9e65b13d22ce7dc11b8da62cdc6fccaf093fbaf2fcc26bd29e7a397189f6c036"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.048420 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "a693759e-220d-4f38-ab6d-e4e21b91fefa" (UID: "a693759e-220d-4f38-ab6d-e4e21b91fefa"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.050923 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a693759e-220d-4f38-ab6d-e4e21b91fefa-kube-api-access-47gql" (OuterVolumeSpecName: "kube-api-access-47gql") pod "a693759e-220d-4f38-ab6d-e4e21b91fefa" (UID: "a693759e-220d-4f38-ab6d-e4e21b91fefa"). InnerVolumeSpecName "kube-api-access-47gql". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.064943 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" event={"ID":"316037aa-fe14-4391-b010-8e0964a4758a","Type":"ContainerStarted","Data":"5c396839160c876e96e9a99987e882affad4d650ec4317d7c239fc5df5f8afe6"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.086675 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "814cb7df-caa1-49f3-a26a-7aea04b643e8" (UID: "814cb7df-caa1-49f3-a26a-7aea04b643e8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.087550 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xlbkn" event={"ID":"347d9325-3d74-4146-94fe-c469e83043c9","Type":"ContainerStarted","Data":"f94a03c091b442bbeef32e75d115d67ccf90cc28437dc4670effa06458ebc96f"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.093604 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-tcp2s" podStartSLOduration=12.09358764 podStartE2EDuration="12.09358764s" podCreationTimestamp="2026-01-22 05:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:42.010834393 +0000 UTC m=+1148.094322608" watchObservedRunningTime="2026-01-22 05:37:42.09358764 +0000 UTC m=+1148.177075855" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.103037 4814 generic.go:334] "Generic (PLEG): container finished" podID="9b993bf4-b353-4ca1-a01c-cfbae095a030" containerID="92b63d94cecd3fc08e467870add701dee2bc20eb579b6e3e79f689710dc87e7a" exitCode=0 Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.103140 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5bec-account-create-update-br287" event={"ID":"9b993bf4-b353-4ca1-a01c-cfbae095a030","Type":"ContainerDied","Data":"92b63d94cecd3fc08e467870add701dee2bc20eb579b6e3e79f689710dc87e7a"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.104842 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerStarted","Data":"db4a93f9650008bc14c06791ddf9ef0ba4894fc895c5ec6021c2b02d9f8c40fc"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.113431 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7dddp" event={"ID":"67b4bdd2-9667-497f-a85c-25d2c479e713","Type":"ContainerStarted","Data":"202c60226d65cbe1618a05be211f44e1391c07cc9e084af6e342a4232a6b4113"} Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.125946 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47gql\" (UniqueName: \"kubernetes.io/projected/a693759e-220d-4f38-ab6d-e4e21b91fefa-kube-api-access-47gql\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.125973 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.125983 4814 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.205070 4814 scope.go:117] "RemoveContainer" containerID="009bb59520e0be6831e77037620b76c77872439380d6335873339423a398b574" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.301484 4814 scope.go:117] "RemoveContainer" containerID="cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.481470 4814 scope.go:117] "RemoveContainer" containerID="5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.537410 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data" (OuterVolumeSpecName: "config-data") pod "814cb7df-caa1-49f3-a26a-7aea04b643e8" (UID: "814cb7df-caa1-49f3-a26a-7aea04b643e8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.545730 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/814cb7df-caa1-49f3-a26a-7aea04b643e8-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: W0122 05:37:42.587151 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09049d3c_1578_479a_b0e4_c853df37c918.slice/crio-15cc7e2ecc2fd4f8e3b395de55cf251c9f7aec819224dfc0fa6d526b3f38f3c1 WatchSource:0}: Error finding container 15cc7e2ecc2fd4f8e3b395de55cf251c9f7aec819224dfc0fa6d526b3f38f3c1: Status 404 returned error can't find the container with id 15cc7e2ecc2fd4f8e3b395de55cf251c9f7aec819224dfc0fa6d526b3f38f3c1 Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.807510 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-config" (OuterVolumeSpecName: "config") pod "a693759e-220d-4f38-ab6d-e4e21b91fefa" (UID: "a693759e-220d-4f38-ab6d-e4e21b91fefa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.810800 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-config\") pod \"a693759e-220d-4f38-ab6d-e4e21b91fefa\" (UID: \"a693759e-220d-4f38-ab6d-e4e21b91fefa\") " Jan 22 05:37:42 crc kubenswrapper[4814]: W0122 05:37:42.811002 4814 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/a693759e-220d-4f38-ab6d-e4e21b91fefa/volumes/kubernetes.io~secret/config Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.811012 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-config" (OuterVolumeSpecName: "config") pod "a693759e-220d-4f38-ab6d-e4e21b91fefa" (UID: "a693759e-220d-4f38-ab6d-e4e21b91fefa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.811438 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.975160 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a693759e-220d-4f38-ab6d-e4e21b91fefa" (UID: "a693759e-220d-4f38-ab6d-e4e21b91fefa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:42 crc kubenswrapper[4814]: I0122 05:37:42.986727 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "a693759e-220d-4f38-ab6d-e4e21b91fefa" (UID: "a693759e-220d-4f38-ab6d-e4e21b91fefa"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.019972 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.020012 4814 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a693759e-220d-4f38-ab6d-e4e21b91fefa-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.045374 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7c6997bf6c-nnkzn"] Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.045419 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-767ffccf56-cjdh4"] Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.045432 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5cf6ffb976-669wk"] Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.102796 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fc797bd5d-f6wlm" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.151:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.151:8443: connect: connection refused" Jan 22 05:37:43 crc kubenswrapper[4814]: E0122 05:37:43.105660 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda693759e_220d_4f38_ab6d_e4e21b91fefa.slice/crio-6c619aca2255e0d51789df55bb94de81848a16ecf122ba79589d5de73f2bf624\": RecentStats: unable to find data in memory cache]" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.117184 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.163975 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.197714 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:43 crc kubenswrapper[4814]: E0122 05:37:43.198315 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerName="neutron-httpd" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.198333 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerName="neutron-httpd" Jan 22 05:37:43 crc kubenswrapper[4814]: E0122 05:37:43.198353 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerName="neutron-api" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.198361 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerName="neutron-api" Jan 22 05:37:43 crc kubenswrapper[4814]: E0122 05:37:43.198376 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerName="cinder-api-log" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.198384 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerName="cinder-api-log" Jan 22 05:37:43 crc kubenswrapper[4814]: E0122 05:37:43.198399 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerName="cinder-api" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.198406 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerName="cinder-api" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.198688 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerName="neutron-api" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.198717 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" containerName="neutron-httpd" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.198730 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerName="cinder-api" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.198744 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" containerName="cinder-api-log" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.200039 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.207131 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.208122 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.208265 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.210964 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.225907 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-767ffccf56-cjdh4" event={"ID":"09049d3c-1578-479a-b0e4-c853df37c918","Type":"ContainerStarted","Data":"15cc7e2ecc2fd4f8e3b395de55cf251c9f7aec819224dfc0fa6d526b3f38f3c1"} Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227073 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65ade639-11da-4715-ba61-82fc157fc3a8-logs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227182 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-config-data\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227300 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbc5n\" (UniqueName: \"kubernetes.io/projected/65ade639-11da-4715-ba61-82fc157fc3a8-kube-api-access-hbc5n\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227376 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227489 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-config-data-custom\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227563 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-scripts\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227709 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65ade639-11da-4715-ba61-82fc157fc3a8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227800 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-public-tls-certs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.227891 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.229657 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7c6997bf6c-nnkzn" event={"ID":"72882366-342a-42e4-a12c-c40850b7358d","Type":"ContainerStarted","Data":"21dac7c878f43a734275cb98780c9a1fa1af3c2c5239ebddde9103c592a3a6b6"} Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.237661 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" event={"ID":"0ea7d2e2-9d56-4f96-bb9a-646c1191e800","Type":"ContainerStarted","Data":"7e17c850414cc4bc74d2582bac391f7c21319eba271c5cd15a70abac6f9d0a0a"} Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.246234 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78869465b8-8rvmm" event={"ID":"497ad917-0e7c-41f5-ba64-29a3f5e71ca3","Type":"ContainerStarted","Data":"021a4a3806642e2de283adf3f065c23c55c61b1c89bbc7d08a1dfe1839fd0386"} Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.246730 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.259651 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" event={"ID":"8975f4f8-ca9b-483c-9627-266538c2036f","Type":"ContainerStarted","Data":"13226bc0b5edea2b2fb1487c9460153283f100abd66290938c62b3e22ec4a84a"} Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.283847 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" event={"ID":"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e","Type":"ContainerStarted","Data":"e288cb59351fc005b9dba1c8df7b2bedd54c4b88133f7fe3756bfa12bbcd2cd6"} Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.300872 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-78869465b8-8rvmm" podStartSLOduration=9.300850116 podStartE2EDuration="9.300850116s" podCreationTimestamp="2026-01-22 05:37:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:43.264394631 +0000 UTC m=+1149.347882846" watchObservedRunningTime="2026-01-22 05:37:43.300850116 +0000 UTC m=+1149.384338331" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.306348 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xlbkn" event={"ID":"347d9325-3d74-4146-94fe-c469e83043c9","Type":"ContainerStarted","Data":"3bd841d6331c4c9c4b6db623e622e282424b2a2bf7294bc87b9a3559e4574fcc"} Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.312350 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" podStartSLOduration=13.312335766 podStartE2EDuration="13.312335766s" podCreationTimestamp="2026-01-22 05:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:43.302429036 +0000 UTC m=+1149.385917251" watchObservedRunningTime="2026-01-22 05:37:43.312335766 +0000 UTC m=+1149.395823981" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.323320 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7dddp" event={"ID":"67b4bdd2-9667-497f-a85c-25d2c479e713","Type":"ContainerStarted","Data":"d2a0cb12391ffc95eec010198852e33f884bb4a62d58fc1194b37b10e2546a0b"} Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.323476 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" podStartSLOduration=13.323459895 podStartE2EDuration="13.323459895s" podCreationTimestamp="2026-01-22 05:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:43.318971285 +0000 UTC m=+1149.402459500" watchObservedRunningTime="2026-01-22 05:37:43.323459895 +0000 UTC m=+1149.406948110" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353468 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-config-data-custom\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353522 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-scripts\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353571 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65ade639-11da-4715-ba61-82fc157fc3a8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353616 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-public-tls-certs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353661 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353764 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65ade639-11da-4715-ba61-82fc157fc3a8-logs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353789 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-config-data\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353860 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbc5n\" (UniqueName: \"kubernetes.io/projected/65ade639-11da-4715-ba61-82fc157fc3a8-kube-api-access-hbc5n\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.353908 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.354689 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/65ade639-11da-4715-ba61-82fc157fc3a8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.358083 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-scripts\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.359038 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.359795 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65ade639-11da-4715-ba61-82fc157fc3a8-logs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.361339 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.363128 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-config-data-custom\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.399796 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbc5n\" (UniqueName: \"kubernetes.io/projected/65ade639-11da-4715-ba61-82fc157fc3a8-kube-api-access-hbc5n\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.427420 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-config-data\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.457252 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/65ade639-11da-4715-ba61-82fc157fc3a8-public-tls-certs\") pod \"cinder-api-0\" (UID: \"65ade639-11da-4715-ba61-82fc157fc3a8\") " pod="openstack/cinder-api-0" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.475886 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-787f7bb69b-kz5kq"] Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.518724 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-7dddp" podStartSLOduration=13.518701314 podStartE2EDuration="13.518701314s" podCreationTimestamp="2026-01-22 05:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:43.375138148 +0000 UTC m=+1149.458626373" watchObservedRunningTime="2026-01-22 05:37:43.518701314 +0000 UTC m=+1149.602189529" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.518971 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-787f7bb69b-kz5kq"] Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.563188 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-xlbkn" podStartSLOduration=14.56316718 podStartE2EDuration="14.56316718s" podCreationTimestamp="2026-01-22 05:37:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:43.443498883 +0000 UTC m=+1149.526987098" watchObservedRunningTime="2026-01-22 05:37:43.56316718 +0000 UTC m=+1149.646655395" Jan 22 05:37:43 crc kubenswrapper[4814]: I0122 05:37:43.570791 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.237616 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.296460 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hq2f\" (UniqueName: \"kubernetes.io/projected/9b993bf4-b353-4ca1-a01c-cfbae095a030-kube-api-access-7hq2f\") pod \"9b993bf4-b353-4ca1-a01c-cfbae095a030\" (UID: \"9b993bf4-b353-4ca1-a01c-cfbae095a030\") " Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.296593 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b993bf4-b353-4ca1-a01c-cfbae095a030-operator-scripts\") pod \"9b993bf4-b353-4ca1-a01c-cfbae095a030\" (UID: \"9b993bf4-b353-4ca1-a01c-cfbae095a030\") " Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.297168 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b993bf4-b353-4ca1-a01c-cfbae095a030-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9b993bf4-b353-4ca1-a01c-cfbae095a030" (UID: "9b993bf4-b353-4ca1-a01c-cfbae095a030"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.297740 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b993bf4-b353-4ca1-a01c-cfbae095a030-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.303448 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b993bf4-b353-4ca1-a01c-cfbae095a030-kube-api-access-7hq2f" (OuterVolumeSpecName: "kube-api-access-7hq2f") pod "9b993bf4-b353-4ca1-a01c-cfbae095a030" (UID: "9b993bf4-b353-4ca1-a01c-cfbae095a030"). InnerVolumeSpecName "kube-api-access-7hq2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.370789 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="814cb7df-caa1-49f3-a26a-7aea04b643e8" path="/var/lib/kubelet/pods/814cb7df-caa1-49f3-a26a-7aea04b643e8/volumes" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.371506 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a693759e-220d-4f38-ab6d-e4e21b91fefa" path="/var/lib/kubelet/pods/a693759e-220d-4f38-ab6d-e4e21b91fefa/volumes" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.392345 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerStarted","Data":"a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.405003 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hq2f\" (UniqueName: \"kubernetes.io/projected/9b993bf4-b353-4ca1-a01c-cfbae095a030-kube-api-access-7hq2f\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.419948 4814 generic.go:334] "Generic (PLEG): container finished" podID="67b4bdd2-9667-497f-a85c-25d2c479e713" containerID="d2a0cb12391ffc95eec010198852e33f884bb4a62d58fc1194b37b10e2546a0b" exitCode=0 Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.420025 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7dddp" event={"ID":"67b4bdd2-9667-497f-a85c-25d2c479e713","Type":"ContainerDied","Data":"d2a0cb12391ffc95eec010198852e33f884bb4a62d58fc1194b37b10e2546a0b"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.428381 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-86d756df4f-476sb"] Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.441466 4814 generic.go:334] "Generic (PLEG): container finished" podID="fee64fd2-2b4c-4b2c-9041-590c282c2e5b" containerID="eb0c548282539e794c3ee29d0f306e6c108d20301212e4cb421f3fecc21065a8" exitCode=0 Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.441525 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tcp2s" event={"ID":"fee64fd2-2b4c-4b2c-9041-590c282c2e5b","Type":"ContainerDied","Data":"eb0c548282539e794c3ee29d0f306e6c108d20301212e4cb421f3fecc21065a8"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.450141 4814 generic.go:334] "Generic (PLEG): container finished" podID="347d9325-3d74-4146-94fe-c469e83043c9" containerID="3bd841d6331c4c9c4b6db623e622e282424b2a2bf7294bc87b9a3559e4574fcc" exitCode=0 Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.450233 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xlbkn" event={"ID":"347d9325-3d74-4146-94fe-c469e83043c9","Type":"ContainerDied","Data":"3bd841d6331c4c9c4b6db623e622e282424b2a2bf7294bc87b9a3559e4574fcc"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.455443 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5d54fb775f-hkkfd"] Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.464204 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5bec-account-create-update-br287" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.464811 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5bec-account-create-update-br287" event={"ID":"9b993bf4-b353-4ca1-a01c-cfbae095a030","Type":"ContainerDied","Data":"605ba44e364894e733387b874b236a3e787f41eef02e2045131f622f61ebb595"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.464857 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="605ba44e364894e733387b874b236a3e787f41eef02e2045131f622f61ebb595" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.493005 4814 generic.go:334] "Generic (PLEG): container finished" podID="c435e1dd-d906-4003-94cd-e78a57e0ab26" containerID="a8c607f3d73e4c18355c67a40bfc4e5356343247d60471747d4f5f31e2d89459" exitCode=0 Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.493991 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-d5868ff97-qmvxn"] Jan 22 05:37:44 crc kubenswrapper[4814]: E0122 05:37:44.494351 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b993bf4-b353-4ca1-a01c-cfbae095a030" containerName="mariadb-account-create-update" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.494369 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b993bf4-b353-4ca1-a01c-cfbae095a030" containerName="mariadb-account-create-update" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.494563 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b993bf4-b353-4ca1-a01c-cfbae095a030" containerName="mariadb-account-create-update" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.495089 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" event={"ID":"c435e1dd-d906-4003-94cd-e78a57e0ab26","Type":"ContainerDied","Data":"a8c607f3d73e4c18355c67a40bfc4e5356343247d60471747d4f5f31e2d89459"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.495146 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.506236 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.506514 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.507078 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-config-data-custom\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.507109 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-public-tls-certs\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.507151 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-combined-ca-bundle\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.507166 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw9v4\" (UniqueName: \"kubernetes.io/projected/935facb1-0683-4ef0-9706-95495b3d3ff1-kube-api-access-pw9v4\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.507215 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-config-data\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.507273 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-internal-tls-certs\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.510550 4814 generic.go:334] "Generic (PLEG): container finished" podID="8975f4f8-ca9b-483c-9627-266538c2036f" containerID="13226bc0b5edea2b2fb1487c9460153283f100abd66290938c62b3e22ec4a84a" exitCode=0 Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.510755 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" event={"ID":"8975f4f8-ca9b-483c-9627-266538c2036f","Type":"ContainerDied","Data":"13226bc0b5edea2b2fb1487c9460153283f100abd66290938c62b3e22ec4a84a"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.513312 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-d5868ff97-qmvxn"] Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.514908 4814 generic.go:334] "Generic (PLEG): container finished" podID="5b09adfa-27dd-431e-a1ad-4ddd7f308c8e" containerID="e288cb59351fc005b9dba1c8df7b2bedd54c4b88133f7fe3756bfa12bbcd2cd6" exitCode=0 Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.514957 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" event={"ID":"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e","Type":"ContainerDied","Data":"e288cb59351fc005b9dba1c8df7b2bedd54c4b88133f7fe3756bfa12bbcd2cd6"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.517262 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" event={"ID":"316037aa-fe14-4391-b010-8e0964a4758a","Type":"ContainerStarted","Data":"63336a3c9fe71ce83425065f35311fd490b004e386d6acddb68191394996f7d8"} Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.557679 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-74f5fd4998-wjl2m"] Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.558994 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.562030 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.562280 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.579651 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-74f5fd4998-wjl2m"] Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608045 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-internal-tls-certs\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608125 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-public-tls-certs\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608156 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-config-data-custom\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608195 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-config-data-custom\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608212 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-combined-ca-bundle\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608239 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-public-tls-certs\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608277 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-combined-ca-bundle\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608294 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw9v4\" (UniqueName: \"kubernetes.io/projected/935facb1-0683-4ef0-9706-95495b3d3ff1-kube-api-access-pw9v4\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608331 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5jqj\" (UniqueName: \"kubernetes.io/projected/b571889b-5927-4dae-a503-3bc00e8511bf-kube-api-access-p5jqj\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608356 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-config-data\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608374 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-config-data\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.608408 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-internal-tls-certs\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.621030 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-config-data\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.631327 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-internal-tls-certs\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.643550 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw9v4\" (UniqueName: \"kubernetes.io/projected/935facb1-0683-4ef0-9706-95495b3d3ff1-kube-api-access-pw9v4\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.657773 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-public-tls-certs\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.658614 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-combined-ca-bundle\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.665329 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/935facb1-0683-4ef0-9706-95495b3d3ff1-config-data-custom\") pod \"heat-cfnapi-d5868ff97-qmvxn\" (UID: \"935facb1-0683-4ef0-9706-95495b3d3ff1\") " pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.710188 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-public-tls-certs\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.710242 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-config-data-custom\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.710273 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-combined-ca-bundle\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.710815 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5jqj\" (UniqueName: \"kubernetes.io/projected/b571889b-5927-4dae-a503-3bc00e8511bf-kube-api-access-p5jqj\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.710851 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-config-data\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.710881 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-internal-tls-certs\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.714035 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-combined-ca-bundle\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.715594 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-config-data\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.728689 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-public-tls-certs\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.730175 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-config-data-custom\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.730785 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b571889b-5927-4dae-a503-3bc00e8511bf-internal-tls-certs\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.733474 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5jqj\" (UniqueName: \"kubernetes.io/projected/b571889b-5927-4dae-a503-3bc00e8511bf-kube-api-access-p5jqj\") pod \"heat-api-74f5fd4998-wjl2m\" (UID: \"b571889b-5927-4dae-a503-3bc00e8511bf\") " pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.821450 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:44 crc kubenswrapper[4814]: I0122 05:37:44.894549 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:45 crc kubenswrapper[4814]: I0122 05:37:45.256309 4814 scope.go:117] "RemoveContainer" containerID="cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684" Jan 22 05:37:45 crc kubenswrapper[4814]: E0122 05:37:45.256930 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684\": container with ID starting with cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684 not found: ID does not exist" containerID="cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684" Jan 22 05:37:45 crc kubenswrapper[4814]: I0122 05:37:45.256959 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684"} err="failed to get container status \"cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684\": rpc error: code = NotFound desc = could not find container \"cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684\": container with ID starting with cf4ae4a88dfc1c5930bec18ff9596582fff0a4d31959829a71c3e9d08dc3a684 not found: ID does not exist" Jan 22 05:37:45 crc kubenswrapper[4814]: I0122 05:37:45.256979 4814 scope.go:117] "RemoveContainer" containerID="5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9" Jan 22 05:37:45 crc kubenswrapper[4814]: E0122 05:37:45.257162 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9\": container with ID starting with 5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9 not found: ID does not exist" containerID="5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9" Jan 22 05:37:45 crc kubenswrapper[4814]: I0122 05:37:45.257184 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9"} err="failed to get container status \"5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9\": rpc error: code = NotFound desc = could not find container \"5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9\": container with ID starting with 5c62d3c7a1029bbde8e91169e8849a6b7b7312fc0e65d174a1704ba0946011d9 not found: ID does not exist" Jan 22 05:37:45 crc kubenswrapper[4814]: I0122 05:37:45.560994 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7c6997bf6c-nnkzn" event={"ID":"72882366-342a-42e4-a12c-c40850b7358d","Type":"ContainerStarted","Data":"07b6c5da8382a2dd56c430d4ff4bd5bd1b56a98df8af17cc5d76c1146002b896"} Jan 22 05:37:45 crc kubenswrapper[4814]: I0122 05:37:45.562787 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:37:45 crc kubenswrapper[4814]: I0122 05:37:45.601378 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-7c6997bf6c-nnkzn" podStartSLOduration=5.601358438 podStartE2EDuration="5.601358438s" podCreationTimestamp="2026-01-22 05:37:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:45.593686667 +0000 UTC m=+1151.677174882" watchObservedRunningTime="2026-01-22 05:37:45.601358438 +0000 UTC m=+1151.684846653" Jan 22 05:37:45 crc kubenswrapper[4814]: I0122 05:37:45.924365 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 22 05:37:46 crc kubenswrapper[4814]: W0122 05:37:46.095753 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65ade639_11da_4715_ba61_82fc157fc3a8.slice/crio-0fd36d6b6f8e6ee5868eefa365c342c7e5384abe0ebfe1045e83ff48ae64820c WatchSource:0}: Error finding container 0fd36d6b6f8e6ee5868eefa365c342c7e5384abe0ebfe1045e83ff48ae64820c: Status 404 returned error can't find the container with id 0fd36d6b6f8e6ee5868eefa365c342c7e5384abe0ebfe1045e83ff48ae64820c Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.458031 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.472699 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh8v9\" (UniqueName: \"kubernetes.io/projected/8975f4f8-ca9b-483c-9627-266538c2036f-kube-api-access-vh8v9\") pod \"8975f4f8-ca9b-483c-9627-266538c2036f\" (UID: \"8975f4f8-ca9b-483c-9627-266538c2036f\") " Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.472805 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8975f4f8-ca9b-483c-9627-266538c2036f-operator-scripts\") pod \"8975f4f8-ca9b-483c-9627-266538c2036f\" (UID: \"8975f4f8-ca9b-483c-9627-266538c2036f\") " Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.473756 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8975f4f8-ca9b-483c-9627-266538c2036f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8975f4f8-ca9b-483c-9627-266538c2036f" (UID: "8975f4f8-ca9b-483c-9627-266538c2036f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.559359 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8975f4f8-ca9b-483c-9627-266538c2036f-kube-api-access-vh8v9" (OuterVolumeSpecName: "kube-api-access-vh8v9") pod "8975f4f8-ca9b-483c-9627-266538c2036f" (UID: "8975f4f8-ca9b-483c-9627-266538c2036f"). InnerVolumeSpecName "kube-api-access-vh8v9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.587784 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh8v9\" (UniqueName: \"kubernetes.io/projected/8975f4f8-ca9b-483c-9627-266538c2036f-kube-api-access-vh8v9\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.587816 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8975f4f8-ca9b-483c-9627-266538c2036f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.644550 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"65ade639-11da-4715-ba61-82fc157fc3a8","Type":"ContainerStarted","Data":"0fd36d6b6f8e6ee5868eefa365c342c7e5384abe0ebfe1045e83ff48ae64820c"} Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.672498 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" event={"ID":"c435e1dd-d906-4003-94cd-e78a57e0ab26","Type":"ContainerStarted","Data":"f86ec37b174f5629fedeb750d0c34c62dbd8e515b941b2b20d00be9d08c2ab0e"} Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.673461 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.735353 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" podStartSLOduration=12.735338623 podStartE2EDuration="12.735338623s" podCreationTimestamp="2026-01-22 05:37:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:46.734325181 +0000 UTC m=+1152.817813396" watchObservedRunningTime="2026-01-22 05:37:46.735338623 +0000 UTC m=+1152.818826828" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.742453 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" event={"ID":"8975f4f8-ca9b-483c-9627-266538c2036f","Type":"ContainerDied","Data":"5aaa201163aec083a9dd6e795ec32f51192bf50e1dc57507c8ddc7c370b5b3ea"} Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.742496 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5aaa201163aec083a9dd6e795ec32f51192bf50e1dc57507c8ddc7c370b5b3ea" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.742562 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a091-account-create-update-pkqsv" Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.881747 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-d5868ff97-qmvxn"] Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.895602 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-74f5fd4998-wjl2m"] Jan 22 05:37:46 crc kubenswrapper[4814]: W0122 05:37:46.919381 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb571889b_5927_4dae_a503_3bc00e8511bf.slice/crio-8caa5a942a70ddf725251af5c883a37236f09b788cb7318f0235141bc9745bd6 WatchSource:0}: Error finding container 8caa5a942a70ddf725251af5c883a37236f09b788cb7318f0235141bc9745bd6: Status 404 returned error can't find the container with id 8caa5a942a70ddf725251af5c883a37236f09b788cb7318f0235141bc9745bd6 Jan 22 05:37:46 crc kubenswrapper[4814]: I0122 05:37:46.930218 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.049785 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.049926 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.050252 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.051254 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.051271 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.051290 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" event={"ID":"316037aa-fe14-4391-b010-8e0964a4758a","Type":"ContainerStarted","Data":"b396bc4fe6489f7bd360f165772ab290747af5d0079c35ab8fa85cf4d8342007"} Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.059893 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" event={"ID":"0ea7d2e2-9d56-4f96-bb9a-646c1191e800","Type":"ContainerStarted","Data":"f270bbf569fa0431c6debbb7a11052f84d38ac1bd228fc4aa8e2ee545d73a224"} Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.066739 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.081548 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" podStartSLOduration=17.08153036 podStartE2EDuration="17.08153036s" podCreationTimestamp="2026-01-22 05:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:47.070955258 +0000 UTC m=+1153.154443473" watchObservedRunningTime="2026-01-22 05:37:47.08153036 +0000 UTC m=+1153.165018575" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162052 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gl6kj\" (UniqueName: \"kubernetes.io/projected/67b4bdd2-9667-497f-a85c-25d2c479e713-kube-api-access-gl6kj\") pod \"67b4bdd2-9667-497f-a85c-25d2c479e713\" (UID: \"67b4bdd2-9667-497f-a85c-25d2c479e713\") " Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162092 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjpgd\" (UniqueName: \"kubernetes.io/projected/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-kube-api-access-hjpgd\") pod \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\" (UID: \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\") " Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162159 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smkdj\" (UniqueName: \"kubernetes.io/projected/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-kube-api-access-smkdj\") pod \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\" (UID: \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\") " Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162196 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-operator-scripts\") pod \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\" (UID: \"fee64fd2-2b4c-4b2c-9041-590c282c2e5b\") " Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162256 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/347d9325-3d74-4146-94fe-c469e83043c9-operator-scripts\") pod \"347d9325-3d74-4146-94fe-c469e83043c9\" (UID: \"347d9325-3d74-4146-94fe-c469e83043c9\") " Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162315 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67b4bdd2-9667-497f-a85c-25d2c479e713-operator-scripts\") pod \"67b4bdd2-9667-497f-a85c-25d2c479e713\" (UID: \"67b4bdd2-9667-497f-a85c-25d2c479e713\") " Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162368 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-operator-scripts\") pod \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\" (UID: \"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e\") " Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162664 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2h7v\" (UniqueName: \"kubernetes.io/projected/347d9325-3d74-4146-94fe-c469e83043c9-kube-api-access-z2h7v\") pod \"347d9325-3d74-4146-94fe-c469e83043c9\" (UID: \"347d9325-3d74-4146-94fe-c469e83043c9\") " Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.162845 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5b09adfa-27dd-431e-a1ad-4ddd7f308c8e" (UID: "5b09adfa-27dd-431e-a1ad-4ddd7f308c8e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.163673 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fee64fd2-2b4c-4b2c-9041-590c282c2e5b" (UID: "fee64fd2-2b4c-4b2c-9041-590c282c2e5b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.164330 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.164354 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.164461 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/347d9325-3d74-4146-94fe-c469e83043c9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "347d9325-3d74-4146-94fe-c469e83043c9" (UID: "347d9325-3d74-4146-94fe-c469e83043c9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.164811 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67b4bdd2-9667-497f-a85c-25d2c479e713-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "67b4bdd2-9667-497f-a85c-25d2c479e713" (UID: "67b4bdd2-9667-497f-a85c-25d2c479e713"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.175143 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/347d9325-3d74-4146-94fe-c469e83043c9-kube-api-access-z2h7v" (OuterVolumeSpecName: "kube-api-access-z2h7v") pod "347d9325-3d74-4146-94fe-c469e83043c9" (UID: "347d9325-3d74-4146-94fe-c469e83043c9"). InnerVolumeSpecName "kube-api-access-z2h7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.175321 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-kube-api-access-hjpgd" (OuterVolumeSpecName: "kube-api-access-hjpgd") pod "fee64fd2-2b4c-4b2c-9041-590c282c2e5b" (UID: "fee64fd2-2b4c-4b2c-9041-590c282c2e5b"). InnerVolumeSpecName "kube-api-access-hjpgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.177229 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67b4bdd2-9667-497f-a85c-25d2c479e713-kube-api-access-gl6kj" (OuterVolumeSpecName: "kube-api-access-gl6kj") pod "67b4bdd2-9667-497f-a85c-25d2c479e713" (UID: "67b4bdd2-9667-497f-a85c-25d2c479e713"). InnerVolumeSpecName "kube-api-access-gl6kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.196293 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-kube-api-access-smkdj" (OuterVolumeSpecName: "kube-api-access-smkdj") pod "5b09adfa-27dd-431e-a1ad-4ddd7f308c8e" (UID: "5b09adfa-27dd-431e-a1ad-4ddd7f308c8e"). InnerVolumeSpecName "kube-api-access-smkdj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.212846 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" podStartSLOduration=3.445739663 podStartE2EDuration="6.212827291s" podCreationTimestamp="2026-01-22 05:37:41 +0000 UTC" firstStartedPulling="2026-01-22 05:37:42.764792639 +0000 UTC m=+1148.848280854" lastFinishedPulling="2026-01-22 05:37:45.531880277 +0000 UTC m=+1151.615368482" observedRunningTime="2026-01-22 05:37:47.139018904 +0000 UTC m=+1153.222507119" watchObservedRunningTime="2026-01-22 05:37:47.212827291 +0000 UTC m=+1153.296315506" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.267240 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/347d9325-3d74-4146-94fe-c469e83043c9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.271924 4814 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67b4bdd2-9667-497f-a85c-25d2c479e713-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.271990 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2h7v\" (UniqueName: \"kubernetes.io/projected/347d9325-3d74-4146-94fe-c469e83043c9-kube-api-access-z2h7v\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.272069 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gl6kj\" (UniqueName: \"kubernetes.io/projected/67b4bdd2-9667-497f-a85c-25d2c479e713-kube-api-access-gl6kj\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.272128 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjpgd\" (UniqueName: \"kubernetes.io/projected/fee64fd2-2b4c-4b2c-9041-590c282c2e5b-kube-api-access-hjpgd\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.272179 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smkdj\" (UniqueName: \"kubernetes.io/projected/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e-kube-api-access-smkdj\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:47 crc kubenswrapper[4814]: I0122 05:37:47.915234 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.088597 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-d5868ff97-qmvxn" event={"ID":"935facb1-0683-4ef0-9706-95495b3d3ff1","Type":"ContainerStarted","Data":"17c874d1b2e5f646543a49bbfcb56006e8988cfdff75c48ef7219f4ca00167b9"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.088899 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-d5868ff97-qmvxn" event={"ID":"935facb1-0683-4ef0-9706-95495b3d3ff1","Type":"ContainerStarted","Data":"383bb771483b705394f341419ea4305475b1da6f58f4dd902067770b7a8ed058"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.090254 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.118860 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"65ade639-11da-4715-ba61-82fc157fc3a8","Type":"ContainerStarted","Data":"1cd98ea6e8f4e4dfecc1d1f2c78ff689870de23e07a1ff735e8f019e3cf64c6e"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.135019 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.135393 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-d5868ff97-qmvxn" podStartSLOduration=4.135373469 podStartE2EDuration="4.135373469s" podCreationTimestamp="2026-01-22 05:37:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:48.121499484 +0000 UTC m=+1154.204987699" watchObservedRunningTime="2026-01-22 05:37:48.135373469 +0000 UTC m=+1154.218861684" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.136381 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e0c7-account-create-update-gr2s7" event={"ID":"5b09adfa-27dd-431e-a1ad-4ddd7f308c8e","Type":"ContainerDied","Data":"9e65b13d22ce7dc11b8da62cdc6fccaf093fbaf2fcc26bd29e7a397189f6c036"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.136437 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e65b13d22ce7dc11b8da62cdc6fccaf093fbaf2fcc26bd29e7a397189f6c036" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.156342 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-xlbkn" event={"ID":"347d9325-3d74-4146-94fe-c469e83043c9","Type":"ContainerDied","Data":"f94a03c091b442bbeef32e75d115d67ccf90cc28437dc4670effa06458ebc96f"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.156385 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f94a03c091b442bbeef32e75d115d67ccf90cc28437dc4670effa06458ebc96f" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.156477 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-xlbkn" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.161162 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86d756df4f-476sb" event={"ID":"c33be205-b621-4a36-8cd8-2e30db89269c","Type":"ContainerStarted","Data":"1abb80514d9f1482b40d78c0972cfb2eadf6660f234b5101abdd61433632f619"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.161299 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-86d756df4f-476sb" podUID="c33be205-b621-4a36-8cd8-2e30db89269c" containerName="heat-cfnapi" containerID="cri-o://1abb80514d9f1482b40d78c0972cfb2eadf6660f234b5101abdd61433632f619" gracePeriod=60 Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.161876 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.178126 4814 generic.go:334] "Generic (PLEG): container finished" podID="09049d3c-1578-479a-b0e4-c853df37c918" containerID="738e174334e8ea351e96c44253f5f3aa5e85f4fe953067d27427d6aca29b2638" exitCode=1 Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.178287 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-767ffccf56-cjdh4" event={"ID":"09049d3c-1578-479a-b0e4-c853df37c918","Type":"ContainerDied","Data":"738e174334e8ea351e96c44253f5f3aa5e85f4fe953067d27427d6aca29b2638"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.178763 4814 scope.go:117] "RemoveContainer" containerID="738e174334e8ea351e96c44253f5f3aa5e85f4fe953067d27427d6aca29b2638" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.186125 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-86d756df4f-476sb" podStartSLOduration=8.53128053 podStartE2EDuration="14.186093071s" podCreationTimestamp="2026-01-22 05:37:34 +0000 UTC" firstStartedPulling="2026-01-22 05:37:39.743132091 +0000 UTC m=+1145.826620306" lastFinishedPulling="2026-01-22 05:37:45.397944632 +0000 UTC m=+1151.481432847" observedRunningTime="2026-01-22 05:37:48.178579126 +0000 UTC m=+1154.262067341" watchObservedRunningTime="2026-01-22 05:37:48.186093071 +0000 UTC m=+1154.269581276" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.208178 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-5d54fb775f-hkkfd" podUID="4b566de2-fe37-4017-8360-0d6d3c2ce4be" containerName="heat-api" containerID="cri-o://3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e" gracePeriod=60 Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.208241 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d54fb775f-hkkfd" event={"ID":"4b566de2-fe37-4017-8360-0d6d3c2ce4be","Type":"ContainerStarted","Data":"3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.208279 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.242703 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tcp2s" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.243342 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tcp2s" event={"ID":"fee64fd2-2b4c-4b2c-9041-590c282c2e5b","Type":"ContainerDied","Data":"2cdc8afc38e099d463b259725bfccac00c7385c97fbba2863e9e04778b9f02c7"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.243384 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2cdc8afc38e099d463b259725bfccac00c7385c97fbba2863e9e04778b9f02c7" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.250655 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5d54fb775f-hkkfd" podStartSLOduration=8.81451587 podStartE2EDuration="14.250625367s" podCreationTimestamp="2026-01-22 05:37:34 +0000 UTC" firstStartedPulling="2026-01-22 05:37:40.058314224 +0000 UTC m=+1146.141802439" lastFinishedPulling="2026-01-22 05:37:45.494423721 +0000 UTC m=+1151.577911936" observedRunningTime="2026-01-22 05:37:48.238337152 +0000 UTC m=+1154.321825367" watchObservedRunningTime="2026-01-22 05:37:48.250625367 +0000 UTC m=+1154.334113582" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.272066 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-74f5fd4998-wjl2m" event={"ID":"b571889b-5927-4dae-a503-3bc00e8511bf","Type":"ContainerStarted","Data":"94ec149f71ac1808e02b790b396cd504fe20deb305e9e33c97c3f1b00c6511d3"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.272114 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-74f5fd4998-wjl2m" event={"ID":"b571889b-5927-4dae-a503-3bc00e8511bf","Type":"ContainerStarted","Data":"8caa5a942a70ddf725251af5c883a37236f09b788cb7318f0235141bc9745bd6"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.272154 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.278589 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerStarted","Data":"d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.278621 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerStarted","Data":"c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.297338 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-7dddp" event={"ID":"67b4bdd2-9667-497f-a85c-25d2c479e713","Type":"ContainerDied","Data":"202c60226d65cbe1618a05be211f44e1391c07cc9e084af6e342a4232a6b4113"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.297376 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="202c60226d65cbe1618a05be211f44e1391c07cc9e084af6e342a4232a6b4113" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.297767 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-7dddp" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.298415 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-74f5fd4998-wjl2m" podStartSLOduration=4.298395187 podStartE2EDuration="4.298395187s" podCreationTimestamp="2026-01-22 05:37:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:48.29278612 +0000 UTC m=+1154.376274325" watchObservedRunningTime="2026-01-22 05:37:48.298395187 +0000 UTC m=+1154.381883402" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.301133 4814 generic.go:334] "Generic (PLEG): container finished" podID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" containerID="f270bbf569fa0431c6debbb7a11052f84d38ac1bd228fc4aa8e2ee545d73a224" exitCode=1 Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.302499 4814 scope.go:117] "RemoveContainer" containerID="f270bbf569fa0431c6debbb7a11052f84d38ac1bd228fc4aa8e2ee545d73a224" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.302835 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" event={"ID":"0ea7d2e2-9d56-4f96-bb9a-646c1191e800","Type":"ContainerDied","Data":"f270bbf569fa0431c6debbb7a11052f84d38ac1bd228fc4aa8e2ee545d73a224"} Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.334707 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" podUID="316037aa-fe14-4391-b010-8e0964a4758a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.889794 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.963423 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data\") pod \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.963554 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfcjv\" (UniqueName: \"kubernetes.io/projected/4b566de2-fe37-4017-8360-0d6d3c2ce4be-kube-api-access-wfcjv\") pod \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.963591 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data-custom\") pod \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " Jan 22 05:37:48 crc kubenswrapper[4814]: I0122 05:37:48.963706 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-combined-ca-bundle\") pod \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\" (UID: \"4b566de2-fe37-4017-8360-0d6d3c2ce4be\") " Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.174125 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4b566de2-fe37-4017-8360-0d6d3c2ce4be" (UID: "4b566de2-fe37-4017-8360-0d6d3c2ce4be"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.202591 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b566de2-fe37-4017-8360-0d6d3c2ce4be" (UID: "4b566de2-fe37-4017-8360-0d6d3c2ce4be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.206825 4814 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.206856 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.245231 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data" (OuterVolumeSpecName: "config-data") pod "4b566de2-fe37-4017-8360-0d6d3c2ce4be" (UID: "4b566de2-fe37-4017-8360-0d6d3c2ce4be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.279527 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b566de2-fe37-4017-8360-0d6d3c2ce4be-kube-api-access-wfcjv" (OuterVolumeSpecName: "kube-api-access-wfcjv") pod "4b566de2-fe37-4017-8360-0d6d3c2ce4be" (UID: "4b566de2-fe37-4017-8360-0d6d3c2ce4be"). InnerVolumeSpecName "kube-api-access-wfcjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.309945 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b566de2-fe37-4017-8360-0d6d3c2ce4be-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.309978 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfcjv\" (UniqueName: \"kubernetes.io/projected/4b566de2-fe37-4017-8360-0d6d3c2ce4be-kube-api-access-wfcjv\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.315014 4814 generic.go:334] "Generic (PLEG): container finished" podID="c33be205-b621-4a36-8cd8-2e30db89269c" containerID="1abb80514d9f1482b40d78c0972cfb2eadf6660f234b5101abdd61433632f619" exitCode=0 Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.315070 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86d756df4f-476sb" event={"ID":"c33be205-b621-4a36-8cd8-2e30db89269c","Type":"ContainerDied","Data":"1abb80514d9f1482b40d78c0972cfb2eadf6660f234b5101abdd61433632f619"} Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.318550 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-767ffccf56-cjdh4" event={"ID":"09049d3c-1578-479a-b0e4-c853df37c918","Type":"ContainerStarted","Data":"9eb21294d559f752ffd90713b3fe430955cfcd6b1cf8d0b8b73fee6fc3c2d98e"} Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.319603 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.321374 4814 generic.go:334] "Generic (PLEG): container finished" podID="4b566de2-fe37-4017-8360-0d6d3c2ce4be" containerID="3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e" exitCode=0 Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.321431 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d54fb775f-hkkfd" event={"ID":"4b566de2-fe37-4017-8360-0d6d3c2ce4be","Type":"ContainerDied","Data":"3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e"} Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.321456 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5d54fb775f-hkkfd" event={"ID":"4b566de2-fe37-4017-8360-0d6d3c2ce4be","Type":"ContainerDied","Data":"a449548325ca93ed6a20cb7456748621ae10bd7a70adf8ef8368cddfdc12eac8"} Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.321472 4814 scope.go:117] "RemoveContainer" containerID="3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.321589 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5d54fb775f-hkkfd" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.334867 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.335483 4814 generic.go:334] "Generic (PLEG): container finished" podID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" containerID="feb7f0e28d88e6c7394a02a9c395ca60f0949f65af49df72a68b661434bb3104" exitCode=1 Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.335712 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" event={"ID":"0ea7d2e2-9d56-4f96-bb9a-646c1191e800","Type":"ContainerDied","Data":"feb7f0e28d88e6c7394a02a9c395ca60f0949f65af49df72a68b661434bb3104"} Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.336242 4814 scope.go:117] "RemoveContainer" containerID="feb7f0e28d88e6c7394a02a9c395ca60f0949f65af49df72a68b661434bb3104" Jan 22 05:37:49 crc kubenswrapper[4814]: E0122 05:37:49.336458 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5cf6ffb976-669wk_openstack(0ea7d2e2-9d56-4f96-bb9a-646c1191e800)\"" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.349922 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-767ffccf56-cjdh4" podStartSLOduration=5.41980158 podStartE2EDuration="8.349906574s" podCreationTimestamp="2026-01-22 05:37:41 +0000 UTC" firstStartedPulling="2026-01-22 05:37:42.591724997 +0000 UTC m=+1148.675213202" lastFinishedPulling="2026-01-22 05:37:45.521829981 +0000 UTC m=+1151.605318196" observedRunningTime="2026-01-22 05:37:49.345284078 +0000 UTC m=+1155.428772293" watchObservedRunningTime="2026-01-22 05:37:49.349906574 +0000 UTC m=+1155.433394789" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.362474 4814 scope.go:117] "RemoveContainer" containerID="3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e" Jan 22 05:37:49 crc kubenswrapper[4814]: E0122 05:37:49.363732 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e\": container with ID starting with 3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e not found: ID does not exist" containerID="3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.363761 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e"} err="failed to get container status \"3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e\": rpc error: code = NotFound desc = could not find container \"3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e\": container with ID starting with 3ecedbe905f77e47f96804046f3175a9fab533370e6545eb80ef53f971d66d4e not found: ID does not exist" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.363783 4814 scope.go:117] "RemoveContainer" containerID="f270bbf569fa0431c6debbb7a11052f84d38ac1bd228fc4aa8e2ee545d73a224" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.411619 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zvcg\" (UniqueName: \"kubernetes.io/projected/c33be205-b621-4a36-8cd8-2e30db89269c-kube-api-access-9zvcg\") pod \"c33be205-b621-4a36-8cd8-2e30db89269c\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.411687 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data\") pod \"c33be205-b621-4a36-8cd8-2e30db89269c\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.411739 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data-custom\") pod \"c33be205-b621-4a36-8cd8-2e30db89269c\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.411849 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-combined-ca-bundle\") pod \"c33be205-b621-4a36-8cd8-2e30db89269c\" (UID: \"c33be205-b621-4a36-8cd8-2e30db89269c\") " Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.429320 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c33be205-b621-4a36-8cd8-2e30db89269c-kube-api-access-9zvcg" (OuterVolumeSpecName: "kube-api-access-9zvcg") pod "c33be205-b621-4a36-8cd8-2e30db89269c" (UID: "c33be205-b621-4a36-8cd8-2e30db89269c"). InnerVolumeSpecName "kube-api-access-9zvcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.434965 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c33be205-b621-4a36-8cd8-2e30db89269c" (UID: "c33be205-b621-4a36-8cd8-2e30db89269c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.439736 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5d54fb775f-hkkfd"] Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.463371 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5d54fb775f-hkkfd"] Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.473969 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c33be205-b621-4a36-8cd8-2e30db89269c" (UID: "c33be205-b621-4a36-8cd8-2e30db89269c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.487713 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data" (OuterVolumeSpecName: "config-data") pod "c33be205-b621-4a36-8cd8-2e30db89269c" (UID: "c33be205-b621-4a36-8cd8-2e30db89269c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.513985 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.514016 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zvcg\" (UniqueName: \"kubernetes.io/projected/c33be205-b621-4a36-8cd8-2e30db89269c-kube-api-access-9zvcg\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.514030 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.514038 4814 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c33be205-b621-4a36-8cd8-2e30db89269c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.614687 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.614755 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.614830 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.615764 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4cc634dfae0a47901cc979ba5b63d3858a39aa8e9b0382a2430471166dd22de7"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:37:49 crc kubenswrapper[4814]: I0122 05:37:49.615837 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://4cc634dfae0a47901cc979ba5b63d3858a39aa8e9b0382a2430471166dd22de7" gracePeriod=600 Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.347547 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="4cc634dfae0a47901cc979ba5b63d3858a39aa8e9b0382a2430471166dd22de7" exitCode=0 Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.361103 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="ceilometer-central-agent" containerID="cri-o://a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b" gracePeriod=30 Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.361150 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="proxy-httpd" containerID="cri-o://21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc" gracePeriod=30 Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.361198 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="sg-core" containerID="cri-o://d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894" gracePeriod=30 Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.361237 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="ceilometer-notification-agent" containerID="cri-o://c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f" gracePeriod=30 Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.361458 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b566de2-fe37-4017-8360-0d6d3c2ce4be" path="/var/lib/kubelet/pods/4b566de2-fe37-4017-8360-0d6d3c2ce4be/volumes" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.362150 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.362167 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"4cc634dfae0a47901cc979ba5b63d3858a39aa8e9b0382a2430471166dd22de7"} Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.362187 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerStarted","Data":"21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc"} Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.362211 4814 scope.go:117] "RemoveContainer" containerID="9500dee208774edd1316e9481891ac3158cca3bdb31ab2aefff48638b4f8e29b" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.365896 4814 scope.go:117] "RemoveContainer" containerID="feb7f0e28d88e6c7394a02a9c395ca60f0949f65af49df72a68b661434bb3104" Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.366216 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5cf6ffb976-669wk_openstack(0ea7d2e2-9d56-4f96-bb9a-646c1191e800)\"" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.377590 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-86d756df4f-476sb" event={"ID":"c33be205-b621-4a36-8cd8-2e30db89269c","Type":"ContainerDied","Data":"94d2d2e6e5c84e4df89b296ac368c20bd8791cf00485d2512a6ac7a7ee036321"} Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.377690 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-86d756df4f-476sb" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.392959 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"65ade639-11da-4715-ba61-82fc157fc3a8","Type":"ContainerStarted","Data":"19b19cdc0b500e26e1688f343dd54cc925e0f538c64817fc8a5f20e3dee997a0"} Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.393904 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.398157 4814 generic.go:334] "Generic (PLEG): container finished" podID="09049d3c-1578-479a-b0e4-c853df37c918" containerID="9eb21294d559f752ffd90713b3fe430955cfcd6b1cf8d0b8b73fee6fc3c2d98e" exitCode=1 Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.398202 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-767ffccf56-cjdh4" event={"ID":"09049d3c-1578-479a-b0e4-c853df37c918","Type":"ContainerDied","Data":"9eb21294d559f752ffd90713b3fe430955cfcd6b1cf8d0b8b73fee6fc3c2d98e"} Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.398473 4814 scope.go:117] "RemoveContainer" containerID="9eb21294d559f752ffd90713b3fe430955cfcd6b1cf8d0b8b73fee6fc3c2d98e" Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.398664 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-767ffccf56-cjdh4_openstack(09049d3c-1578-479a-b0e4-c853df37c918)\"" pod="openstack/heat-api-767ffccf56-cjdh4" podUID="09049d3c-1578-479a-b0e4-c853df37c918" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.437401 4814 scope.go:117] "RemoveContainer" containerID="1abb80514d9f1482b40d78c0972cfb2eadf6660f234b5101abdd61433632f619" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.442474 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.443048409 podStartE2EDuration="11.442450937s" podCreationTimestamp="2026-01-22 05:37:39 +0000 UTC" firstStartedPulling="2026-01-22 05:37:41.490556442 +0000 UTC m=+1147.574044657" lastFinishedPulling="2026-01-22 05:37:49.48995897 +0000 UTC m=+1155.573447185" observedRunningTime="2026-01-22 05:37:50.42116758 +0000 UTC m=+1156.504655795" watchObservedRunningTime="2026-01-22 05:37:50.442450937 +0000 UTC m=+1156.525939152" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.470989 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.470974603 podStartE2EDuration="7.470974603s" podCreationTimestamp="2026-01-22 05:37:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:37:50.46608806 +0000 UTC m=+1156.549576275" watchObservedRunningTime="2026-01-22 05:37:50.470974603 +0000 UTC m=+1156.554462818" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.533752 4814 scope.go:117] "RemoveContainer" containerID="738e174334e8ea351e96c44253f5f3aa5e85f4fe953067d27427d6aca29b2638" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.533852 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-86d756df4f-476sb"] Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.551227 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-86d756df4f-476sb"] Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.906483 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z4pzm"] Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.908607 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fee64fd2-2b4c-4b2c-9041-590c282c2e5b" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908641 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="fee64fd2-2b4c-4b2c-9041-590c282c2e5b" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.908658 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8975f4f8-ca9b-483c-9627-266538c2036f" containerName="mariadb-account-create-update" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908664 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8975f4f8-ca9b-483c-9627-266538c2036f" containerName="mariadb-account-create-update" Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.908677 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67b4bdd2-9667-497f-a85c-25d2c479e713" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908683 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="67b4bdd2-9667-497f-a85c-25d2c479e713" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.908692 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b566de2-fe37-4017-8360-0d6d3c2ce4be" containerName="heat-api" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908698 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b566de2-fe37-4017-8360-0d6d3c2ce4be" containerName="heat-api" Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.908710 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="347d9325-3d74-4146-94fe-c469e83043c9" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908716 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="347d9325-3d74-4146-94fe-c469e83043c9" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.908727 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c33be205-b621-4a36-8cd8-2e30db89269c" containerName="heat-cfnapi" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908733 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c33be205-b621-4a36-8cd8-2e30db89269c" containerName="heat-cfnapi" Jan 22 05:37:50 crc kubenswrapper[4814]: E0122 05:37:50.908744 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b09adfa-27dd-431e-a1ad-4ddd7f308c8e" containerName="mariadb-account-create-update" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908751 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b09adfa-27dd-431e-a1ad-4ddd7f308c8e" containerName="mariadb-account-create-update" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908909 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c33be205-b621-4a36-8cd8-2e30db89269c" containerName="heat-cfnapi" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908922 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="347d9325-3d74-4146-94fe-c469e83043c9" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908931 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="fee64fd2-2b4c-4b2c-9041-590c282c2e5b" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908944 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="67b4bdd2-9667-497f-a85c-25d2c479e713" containerName="mariadb-database-create" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908956 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="8975f4f8-ca9b-483c-9627-266538c2036f" containerName="mariadb-account-create-update" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908974 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b09adfa-27dd-431e-a1ad-4ddd7f308c8e" containerName="mariadb-account-create-update" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.908987 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b566de2-fe37-4017-8360-0d6d3c2ce4be" containerName="heat-api" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.909554 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.911284 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.911506 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-psbcp" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.911745 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 22 05:37:50 crc kubenswrapper[4814]: I0122 05:37:50.942579 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z4pzm"] Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.079379 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.081409 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.081462 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-scripts\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.082178 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-config-data\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.082497 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ncvr\" (UniqueName: \"kubernetes.io/projected/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-kube-api-access-5ncvr\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.085646 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6c6c4b85d7-qfbkp" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.186725 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ncvr\" (UniqueName: \"kubernetes.io/projected/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-kube-api-access-5ncvr\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.187702 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.188835 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-scripts\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.188865 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-config-data\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.197493 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-scripts\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.203348 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-config-data\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.205738 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.206708 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ncvr\" (UniqueName: \"kubernetes.io/projected/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-kube-api-access-5ncvr\") pod \"nova-cell0-conductor-db-sync-z4pzm\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.223443 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.431171 4814 generic.go:334] "Generic (PLEG): container finished" podID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerID="21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc" exitCode=0 Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.431406 4814 generic.go:334] "Generic (PLEG): container finished" podID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerID="d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894" exitCode=2 Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.431415 4814 generic.go:334] "Generic (PLEG): container finished" podID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerID="c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f" exitCode=0 Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.431456 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerDied","Data":"21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc"} Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.431504 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerDied","Data":"d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894"} Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.431517 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerDied","Data":"c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f"} Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.444437 4814 scope.go:117] "RemoveContainer" containerID="9eb21294d559f752ffd90713b3fe430955cfcd6b1cf8d0b8b73fee6fc3c2d98e" Jan 22 05:37:51 crc kubenswrapper[4814]: E0122 05:37:51.444728 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-767ffccf56-cjdh4_openstack(09049d3c-1578-479a-b0e4-c853df37c918)\"" pod="openstack/heat-api-767ffccf56-cjdh4" podUID="09049d3c-1578-479a-b0e4-c853df37c918" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.453680 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"47eea733882c66d487823fb004595bb5b74593750bd6730a1b625e73c2be11e0"} Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.784965 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.870731 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z4pzm"] Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.949880 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.950529 4814 scope.go:117] "RemoveContainer" containerID="feb7f0e28d88e6c7394a02a9c395ca60f0949f65af49df72a68b661434bb3104" Jan 22 05:37:51 crc kubenswrapper[4814]: E0122 05:37:51.950802 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5cf6ffb976-669wk_openstack(0ea7d2e2-9d56-4f96-bb9a-646c1191e800)\"" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" Jan 22 05:37:51 crc kubenswrapper[4814]: I0122 05:37:51.951089 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.097795 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.209554 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-combined-ca-bundle\") pod \"50923695-9bcc-49c5-844f-6275c99729e2\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.209598 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-secret-key\") pod \"50923695-9bcc-49c5-844f-6275c99729e2\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.209658 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2jxg\" (UniqueName: \"kubernetes.io/projected/50923695-9bcc-49c5-844f-6275c99729e2-kube-api-access-h2jxg\") pod \"50923695-9bcc-49c5-844f-6275c99729e2\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.209733 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50923695-9bcc-49c5-844f-6275c99729e2-logs\") pod \"50923695-9bcc-49c5-844f-6275c99729e2\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.209812 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-config-data\") pod \"50923695-9bcc-49c5-844f-6275c99729e2\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.209837 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-tls-certs\") pod \"50923695-9bcc-49c5-844f-6275c99729e2\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.209919 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-scripts\") pod \"50923695-9bcc-49c5-844f-6275c99729e2\" (UID: \"50923695-9bcc-49c5-844f-6275c99729e2\") " Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.211745 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50923695-9bcc-49c5-844f-6275c99729e2-logs" (OuterVolumeSpecName: "logs") pod "50923695-9bcc-49c5-844f-6275c99729e2" (UID: "50923695-9bcc-49c5-844f-6275c99729e2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.215586 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "50923695-9bcc-49c5-844f-6275c99729e2" (UID: "50923695-9bcc-49c5-844f-6275c99729e2"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.220174 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50923695-9bcc-49c5-844f-6275c99729e2-kube-api-access-h2jxg" (OuterVolumeSpecName: "kube-api-access-h2jxg") pod "50923695-9bcc-49c5-844f-6275c99729e2" (UID: "50923695-9bcc-49c5-844f-6275c99729e2"). InnerVolumeSpecName "kube-api-access-h2jxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.234151 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-scripts" (OuterVolumeSpecName: "scripts") pod "50923695-9bcc-49c5-844f-6275c99729e2" (UID: "50923695-9bcc-49c5-844f-6275c99729e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.245250 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50923695-9bcc-49c5-844f-6275c99729e2" (UID: "50923695-9bcc-49c5-844f-6275c99729e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.247401 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-config-data" (OuterVolumeSpecName: "config-data") pod "50923695-9bcc-49c5-844f-6275c99729e2" (UID: "50923695-9bcc-49c5-844f-6275c99729e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.277262 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "50923695-9bcc-49c5-844f-6275c99729e2" (UID: "50923695-9bcc-49c5-844f-6275c99729e2"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.311682 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.311708 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.311721 4814 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.311731 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2jxg\" (UniqueName: \"kubernetes.io/projected/50923695-9bcc-49c5-844f-6275c99729e2-kube-api-access-h2jxg\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.311740 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/50923695-9bcc-49c5-844f-6275c99729e2-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.311751 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/50923695-9bcc-49c5-844f-6275c99729e2-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.311761 4814 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/50923695-9bcc-49c5-844f-6275c99729e2-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.352682 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c33be205-b621-4a36-8cd8-2e30db89269c" path="/var/lib/kubelet/pods/c33be205-b621-4a36-8cd8-2e30db89269c/volumes" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.463999 4814 generic.go:334] "Generic (PLEG): container finished" podID="50923695-9bcc-49c5-844f-6275c99729e2" containerID="7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff" exitCode=137 Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.464081 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fc797bd5d-f6wlm" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.464102 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fc797bd5d-f6wlm" event={"ID":"50923695-9bcc-49c5-844f-6275c99729e2","Type":"ContainerDied","Data":"7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff"} Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.465911 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fc797bd5d-f6wlm" event={"ID":"50923695-9bcc-49c5-844f-6275c99729e2","Type":"ContainerDied","Data":"e0b158fa80cdab1ce98078e95ef9d217a7d096649738a7efb444d15b3c9a5bb3"} Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.465990 4814 scope.go:117] "RemoveContainer" containerID="9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.468769 4814 scope.go:117] "RemoveContainer" containerID="9eb21294d559f752ffd90713b3fe430955cfcd6b1cf8d0b8b73fee6fc3c2d98e" Jan 22 05:37:52 crc kubenswrapper[4814]: E0122 05:37:52.469032 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-767ffccf56-cjdh4_openstack(09049d3c-1578-479a-b0e4-c853df37c918)\"" pod="openstack/heat-api-767ffccf56-cjdh4" podUID="09049d3c-1578-479a-b0e4-c853df37c918" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.469322 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" event={"ID":"f1fd8e88-42ab-43bb-8697-c7aebb8fec34","Type":"ContainerStarted","Data":"a57d7cf5e981fbba257a45eaac39e836cfe4ff0079bd5e7eef00c11a13f527c8"} Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.469666 4814 scope.go:117] "RemoveContainer" containerID="feb7f0e28d88e6c7394a02a9c395ca60f0949f65af49df72a68b661434bb3104" Jan 22 05:37:52 crc kubenswrapper[4814]: E0122 05:37:52.469902 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-5cf6ffb976-669wk_openstack(0ea7d2e2-9d56-4f96-bb9a-646c1191e800)\"" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.487101 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fc797bd5d-f6wlm"] Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.503233 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5fc797bd5d-f6wlm"] Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.673621 4814 scope.go:117] "RemoveContainer" containerID="7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.751516 4814 scope.go:117] "RemoveContainer" containerID="9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1" Jan 22 05:37:52 crc kubenswrapper[4814]: E0122 05:37:52.752295 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1\": container with ID starting with 9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1 not found: ID does not exist" containerID="9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.752343 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1"} err="failed to get container status \"9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1\": rpc error: code = NotFound desc = could not find container \"9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1\": container with ID starting with 9b9f397b2ef25ac63c69976a1ea84fba0814b89575854dd1327004c4934916e1 not found: ID does not exist" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.752371 4814 scope.go:117] "RemoveContainer" containerID="7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff" Jan 22 05:37:52 crc kubenswrapper[4814]: E0122 05:37:52.752923 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff\": container with ID starting with 7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff not found: ID does not exist" containerID="7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff" Jan 22 05:37:52 crc kubenswrapper[4814]: I0122 05:37:52.752944 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff"} err="failed to get container status \"7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff\": rpc error: code = NotFound desc = could not find container \"7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff\": container with ID starting with 7f23a639b1ada62582270bf6799256509c66f1bf11e5ba263f91e0db7ea739ff not found: ID does not exist" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.196009 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.331296 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-run-httpd\") pod \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.331353 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-config-data\") pod \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.331452 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-log-httpd\") pod \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.331470 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-combined-ca-bundle\") pod \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.331571 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-scripts\") pod \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.331614 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-sg-core-conf-yaml\") pod \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.331648 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzg2f\" (UniqueName: \"kubernetes.io/projected/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-kube-api-access-gzg2f\") pod \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\" (UID: \"7f0dee27-7ce9-4f7a-9e69-d3da832700c0\") " Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.332185 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7f0dee27-7ce9-4f7a-9e69-d3da832700c0" (UID: "7f0dee27-7ce9-4f7a-9e69-d3da832700c0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.335063 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7f0dee27-7ce9-4f7a-9e69-d3da832700c0" (UID: "7f0dee27-7ce9-4f7a-9e69-d3da832700c0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.363552 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-scripts" (OuterVolumeSpecName: "scripts") pod "7f0dee27-7ce9-4f7a-9e69-d3da832700c0" (UID: "7f0dee27-7ce9-4f7a-9e69-d3da832700c0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.367842 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-kube-api-access-gzg2f" (OuterVolumeSpecName: "kube-api-access-gzg2f") pod "7f0dee27-7ce9-4f7a-9e69-d3da832700c0" (UID: "7f0dee27-7ce9-4f7a-9e69-d3da832700c0"). InnerVolumeSpecName "kube-api-access-gzg2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.393201 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7f0dee27-7ce9-4f7a-9e69-d3da832700c0" (UID: "7f0dee27-7ce9-4f7a-9e69-d3da832700c0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.436349 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.436376 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.436388 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzg2f\" (UniqueName: \"kubernetes.io/projected/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-kube-api-access-gzg2f\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.436397 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.436405 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.459709 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-config-data" (OuterVolumeSpecName: "config-data") pod "7f0dee27-7ce9-4f7a-9e69-d3da832700c0" (UID: "7f0dee27-7ce9-4f7a-9e69-d3da832700c0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.467755 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7f0dee27-7ce9-4f7a-9e69-d3da832700c0" (UID: "7f0dee27-7ce9-4f7a-9e69-d3da832700c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.493358 4814 generic.go:334] "Generic (PLEG): container finished" podID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerID="a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b" exitCode=0 Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.493423 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerDied","Data":"a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b"} Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.493453 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0dee27-7ce9-4f7a-9e69-d3da832700c0","Type":"ContainerDied","Data":"db4a93f9650008bc14c06791ddf9ef0ba4894fc895c5ec6021c2b02d9f8c40fc"} Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.493486 4814 scope.go:117] "RemoveContainer" containerID="21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.493690 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.552360 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.553479 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.553494 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0dee27-7ce9-4f7a-9e69-d3da832700c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.560524 4814 scope.go:117] "RemoveContainer" containerID="d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.564711 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.576697 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.577727 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="sg-core" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.577767 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="sg-core" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.577786 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="proxy-httpd" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.577793 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="proxy-httpd" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.577818 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="ceilometer-notification-agent" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.577826 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="ceilometer-notification-agent" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.577842 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.577851 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.577879 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon-log" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.577900 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon-log" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.577912 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="ceilometer-central-agent" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.577919 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="ceilometer-central-agent" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.578126 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon-log" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.578145 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="proxy-httpd" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.578168 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="sg-core" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.578186 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="ceilometer-notification-agent" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.578197 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="50923695-9bcc-49c5-844f-6275c99729e2" containerName="horizon" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.578209 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" containerName="ceilometer-central-agent" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.580340 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.582057 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.582222 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.593088 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.614203 4814 scope.go:117] "RemoveContainer" containerID="c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.656711 4814 scope.go:117] "RemoveContainer" containerID="a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.687766 4814 scope.go:117] "RemoveContainer" containerID="21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.688723 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc\": container with ID starting with 21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc not found: ID does not exist" containerID="21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.688754 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc"} err="failed to get container status \"21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc\": rpc error: code = NotFound desc = could not find container \"21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc\": container with ID starting with 21903c3524abde4754710b0c6f7107cea6a4b6f7465e6d4192bc84402d6ce4fc not found: ID does not exist" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.688777 4814 scope.go:117] "RemoveContainer" containerID="d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.691211 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894\": container with ID starting with d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894 not found: ID does not exist" containerID="d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.691232 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894"} err="failed to get container status \"d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894\": rpc error: code = NotFound desc = could not find container \"d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894\": container with ID starting with d26e1787b0504c211e6d7d060237afceff25a2fbd5971ff2e482e337eef36894 not found: ID does not exist" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.691247 4814 scope.go:117] "RemoveContainer" containerID="c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.694671 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f\": container with ID starting with c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f not found: ID does not exist" containerID="c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.694694 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f"} err="failed to get container status \"c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f\": rpc error: code = NotFound desc = could not find container \"c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f\": container with ID starting with c20b389415daba5f9f0780d02636d3f144e158feddca25f5171767535ab1943f not found: ID does not exist" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.694723 4814 scope.go:117] "RemoveContainer" containerID="a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b" Jan 22 05:37:53 crc kubenswrapper[4814]: E0122 05:37:53.695183 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b\": container with ID starting with a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b not found: ID does not exist" containerID="a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.695230 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b"} err="failed to get container status \"a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b\": rpc error: code = NotFound desc = could not find container \"a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b\": container with ID starting with a5253e7117ef8f392925ced945614d7132785d1e9916df8502dfc549e301299b not found: ID does not exist" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.757281 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.757320 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-run-httpd\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.757420 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-scripts\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.757586 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.757692 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-log-httpd\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.757754 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8s42\" (UniqueName: \"kubernetes.io/projected/fa8f1c7b-eee8-4b63-9ca2-775837a71674-kube-api-access-x8s42\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.757789 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-config-data\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.859412 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.859460 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-log-httpd\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.859487 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8s42\" (UniqueName: \"kubernetes.io/projected/fa8f1c7b-eee8-4b63-9ca2-775837a71674-kube-api-access-x8s42\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.859503 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-config-data\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.859574 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.859597 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-run-httpd\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.859648 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-scripts\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.861319 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-log-httpd\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.861605 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-run-httpd\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.865270 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.865967 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.866130 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-scripts\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.868791 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-config-data\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.879534 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8s42\" (UniqueName: \"kubernetes.io/projected/fa8f1c7b-eee8-4b63-9ca2-775837a71674-kube-api-access-x8s42\") pod \"ceilometer-0\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " pod="openstack/ceilometer-0" Jan 22 05:37:53 crc kubenswrapper[4814]: I0122 05:37:53.900530 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:37:54 crc kubenswrapper[4814]: I0122 05:37:54.357857 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50923695-9bcc-49c5-844f-6275c99729e2" path="/var/lib/kubelet/pods/50923695-9bcc-49c5-844f-6275c99729e2/volumes" Jan 22 05:37:54 crc kubenswrapper[4814]: I0122 05:37:54.358687 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f0dee27-7ce9-4f7a-9e69-d3da832700c0" path="/var/lib/kubelet/pods/7f0dee27-7ce9-4f7a-9e69-d3da832700c0/volumes" Jan 22 05:37:54 crc kubenswrapper[4814]: I0122 05:37:54.390079 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:37:54 crc kubenswrapper[4814]: I0122 05:37:54.503855 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerStarted","Data":"b58e74fd45148eba9f33677abcccf0de4a6b64b2b6e12e5af4808806770bbe38"} Jan 22 05:37:54 crc kubenswrapper[4814]: I0122 05:37:54.982517 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:37:55 crc kubenswrapper[4814]: I0122 05:37:55.232855 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:37:55 crc kubenswrapper[4814]: I0122 05:37:55.287754 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-5hll9"] Jan 22 05:37:55 crc kubenswrapper[4814]: I0122 05:37:55.288018 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" podUID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" containerName="dnsmasq-dns" containerID="cri-o://78351aff5ec8b2eeca612e47d857edcbe95c2b889447a2e6d4317c59e9ccdfc1" gracePeriod=10 Jan 22 05:37:55 crc kubenswrapper[4814]: I0122 05:37:55.555185 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerStarted","Data":"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48"} Jan 22 05:37:55 crc kubenswrapper[4814]: I0122 05:37:55.571916 4814 generic.go:334] "Generic (PLEG): container finished" podID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" containerID="78351aff5ec8b2eeca612e47d857edcbe95c2b889447a2e6d4317c59e9ccdfc1" exitCode=0 Jan 22 05:37:55 crc kubenswrapper[4814]: I0122 05:37:55.571969 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" event={"ID":"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3","Type":"ContainerDied","Data":"78351aff5ec8b2eeca612e47d857edcbe95c2b889447a2e6d4317c59e9ccdfc1"} Jan 22 05:37:55 crc kubenswrapper[4814]: I0122 05:37:55.881879 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.044995 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-sb\") pod \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.045358 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-svc\") pod \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.045415 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-swift-storage-0\") pod \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.045570 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-config\") pod \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.045618 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-nb\") pod \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.045750 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljxts\" (UniqueName: \"kubernetes.io/projected/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-kube-api-access-ljxts\") pod \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\" (UID: \"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3\") " Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.060911 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-kube-api-access-ljxts" (OuterVolumeSpecName: "kube-api-access-ljxts") pod "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" (UID: "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3"). InnerVolumeSpecName "kube-api-access-ljxts". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.111390 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" (UID: "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.116509 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" (UID: "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.116695 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" (UID: "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.128886 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-config" (OuterVolumeSpecName: "config") pod "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" (UID: "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.147673 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.147871 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.147934 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.147997 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.148049 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljxts\" (UniqueName: \"kubernetes.io/projected/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-kube-api-access-ljxts\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.157239 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" (UID: "b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.250174 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.586802 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" event={"ID":"b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3","Type":"ContainerDied","Data":"80c26bb38df97c736271b321344d52533134a706ddb1758285dedfbc683c6c2a"} Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.586852 4814 scope.go:117] "RemoveContainer" containerID="78351aff5ec8b2eeca612e47d857edcbe95c2b889447a2e6d4317c59e9ccdfc1" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.586983 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-5hll9" Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.616043 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-5hll9"] Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.630902 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-5hll9"] Jan 22 05:37:56 crc kubenswrapper[4814]: I0122 05:37:56.856054 4814 scope.go:117] "RemoveContainer" containerID="1f81f760a1a9e2fb90d74c94ea2f9cea2741a54d732c79c3e752fd2046cdb624" Jan 22 05:37:57 crc kubenswrapper[4814]: I0122 05:37:57.293057 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-74f5fd4998-wjl2m" Jan 22 05:37:57 crc kubenswrapper[4814]: I0122 05:37:57.354549 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-767ffccf56-cjdh4"] Jan 22 05:37:57 crc kubenswrapper[4814]: I0122 05:37:57.948817 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-d5868ff97-qmvxn" Jan 22 05:37:58 crc kubenswrapper[4814]: I0122 05:37:58.005182 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-5cf6ffb976-669wk"] Jan 22 05:37:58 crc kubenswrapper[4814]: I0122 05:37:58.355277 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" path="/var/lib/kubelet/pods/b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3/volumes" Jan 22 05:38:01 crc kubenswrapper[4814]: I0122 05:38:01.620132 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-7c6997bf6c-nnkzn" Jan 22 05:38:01 crc kubenswrapper[4814]: I0122 05:38:01.671978 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-78869465b8-8rvmm"] Jan 22 05:38:01 crc kubenswrapper[4814]: I0122 05:38:01.672190 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-78869465b8-8rvmm" podUID="497ad917-0e7c-41f5-ba64-29a3f5e71ca3" containerName="heat-engine" containerID="cri-o://021a4a3806642e2de283adf3f065c23c55c61b1c89bbc7d08a1dfe1839fd0386" gracePeriod=60 Jan 22 05:38:02 crc kubenswrapper[4814]: I0122 05:38:02.042687 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:02 crc kubenswrapper[4814]: I0122 05:38:02.150029 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 22 05:38:04 crc kubenswrapper[4814]: E0122 05:38:04.913868 4814 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="021a4a3806642e2de283adf3f065c23c55c61b1c89bbc7d08a1dfe1839fd0386" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 05:38:04 crc kubenswrapper[4814]: E0122 05:38:04.915103 4814 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="021a4a3806642e2de283adf3f065c23c55c61b1c89bbc7d08a1dfe1839fd0386" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 05:38:04 crc kubenswrapper[4814]: E0122 05:38:04.916006 4814 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="021a4a3806642e2de283adf3f065c23c55c61b1c89bbc7d08a1dfe1839fd0386" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 22 05:38:04 crc kubenswrapper[4814]: E0122 05:38:04.916030 4814 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-78869465b8-8rvmm" podUID="497ad917-0e7c-41f5-ba64-29a3f5e71ca3" containerName="heat-engine" Jan 22 05:38:07 crc kubenswrapper[4814]: E0122 05:38:07.282633 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified" Jan 22 05:38:07 crc kubenswrapper[4814]: E0122 05:38:07.283202 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nova-cell0-conductor-db-sync,Image:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CELL_NAME,Value:cell0,ValueFrom:nil,},EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:false,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/kolla/config_files/config.json,SubPath:nova-conductor-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5ncvr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42436,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-cell0-conductor-db-sync-z4pzm_openstack(f1fd8e88-42ab-43bb-8697-c7aebb8fec34): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 05:38:07 crc kubenswrapper[4814]: E0122 05:38:07.284499 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" podUID="f1fd8e88-42ab-43bb-8697-c7aebb8fec34" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.368805 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.377562 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.422247 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data-custom\") pod \"09049d3c-1578-479a-b0e4-c853df37c918\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.422329 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fvcd\" (UniqueName: \"kubernetes.io/projected/09049d3c-1578-479a-b0e4-c853df37c918-kube-api-access-9fvcd\") pod \"09049d3c-1578-479a-b0e4-c853df37c918\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.422368 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-combined-ca-bundle\") pod \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.422410 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfbnc\" (UniqueName: \"kubernetes.io/projected/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-kube-api-access-zfbnc\") pod \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.422445 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data\") pod \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.422502 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data\") pod \"09049d3c-1578-479a-b0e4-c853df37c918\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.422530 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data-custom\") pod \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\" (UID: \"0ea7d2e2-9d56-4f96-bb9a-646c1191e800\") " Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.422599 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-combined-ca-bundle\") pod \"09049d3c-1578-479a-b0e4-c853df37c918\" (UID: \"09049d3c-1578-479a-b0e4-c853df37c918\") " Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.452094 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "09049d3c-1578-479a-b0e4-c853df37c918" (UID: "09049d3c-1578-479a-b0e4-c853df37c918"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.459326 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-kube-api-access-zfbnc" (OuterVolumeSpecName: "kube-api-access-zfbnc") pod "0ea7d2e2-9d56-4f96-bb9a-646c1191e800" (UID: "0ea7d2e2-9d56-4f96-bb9a-646c1191e800"). InnerVolumeSpecName "kube-api-access-zfbnc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.460101 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0ea7d2e2-9d56-4f96-bb9a-646c1191e800" (UID: "0ea7d2e2-9d56-4f96-bb9a-646c1191e800"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.460482 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09049d3c-1578-479a-b0e4-c853df37c918-kube-api-access-9fvcd" (OuterVolumeSpecName: "kube-api-access-9fvcd") pod "09049d3c-1578-479a-b0e4-c853df37c918" (UID: "09049d3c-1578-479a-b0e4-c853df37c918"). InnerVolumeSpecName "kube-api-access-9fvcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.515018 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ea7d2e2-9d56-4f96-bb9a-646c1191e800" (UID: "0ea7d2e2-9d56-4f96-bb9a-646c1191e800"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.524223 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfbnc\" (UniqueName: \"kubernetes.io/projected/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-kube-api-access-zfbnc\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.524320 4814 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.524381 4814 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.524440 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fvcd\" (UniqueName: \"kubernetes.io/projected/09049d3c-1578-479a-b0e4-c853df37c918-kube-api-access-9fvcd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.524495 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.546753 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data" (OuterVolumeSpecName: "config-data") pod "09049d3c-1578-479a-b0e4-c853df37c918" (UID: "09049d3c-1578-479a-b0e4-c853df37c918"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.563770 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09049d3c-1578-479a-b0e4-c853df37c918" (UID: "09049d3c-1578-479a-b0e4-c853df37c918"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.567637 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data" (OuterVolumeSpecName: "config-data") pod "0ea7d2e2-9d56-4f96-bb9a-646c1191e800" (UID: "0ea7d2e2-9d56-4f96-bb9a-646c1191e800"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.625666 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.625699 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09049d3c-1578-479a-b0e4-c853df37c918-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.625710 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ea7d2e2-9d56-4f96-bb9a-646c1191e800-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.710331 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" event={"ID":"0ea7d2e2-9d56-4f96-bb9a-646c1191e800","Type":"ContainerDied","Data":"7e17c850414cc4bc74d2582bac391f7c21319eba271c5cd15a70abac6f9d0a0a"} Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.710413 4814 scope.go:117] "RemoveContainer" containerID="feb7f0e28d88e6c7394a02a9c395ca60f0949f65af49df72a68b661434bb3104" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.710651 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5cf6ffb976-669wk" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.712987 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerStarted","Data":"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460"} Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.719839 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-767ffccf56-cjdh4" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.721767 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-767ffccf56-cjdh4" event={"ID":"09049d3c-1578-479a-b0e4-c853df37c918","Type":"ContainerDied","Data":"15cc7e2ecc2fd4f8e3b395de55cf251c9f7aec819224dfc0fa6d526b3f38f3c1"} Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.734049 4814 scope.go:117] "RemoveContainer" containerID="9eb21294d559f752ffd90713b3fe430955cfcd6b1cf8d0b8b73fee6fc3c2d98e" Jan 22 05:38:07 crc kubenswrapper[4814]: E0122 05:38:07.734098 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified\\\"\"" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" podUID="f1fd8e88-42ab-43bb-8697-c7aebb8fec34" Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.805721 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-5cf6ffb976-669wk"] Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.813463 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-5cf6ffb976-669wk"] Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.822331 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-767ffccf56-cjdh4"] Jan 22 05:38:07 crc kubenswrapper[4814]: I0122 05:38:07.829013 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-767ffccf56-cjdh4"] Jan 22 05:38:08 crc kubenswrapper[4814]: I0122 05:38:08.352694 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09049d3c-1578-479a-b0e4-c853df37c918" path="/var/lib/kubelet/pods/09049d3c-1578-479a-b0e4-c853df37c918/volumes" Jan 22 05:38:08 crc kubenswrapper[4814]: I0122 05:38:08.353478 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" path="/var/lib/kubelet/pods/0ea7d2e2-9d56-4f96-bb9a-646c1191e800/volumes" Jan 22 05:38:08 crc kubenswrapper[4814]: I0122 05:38:08.732149 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerStarted","Data":"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0"} Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.746468 4814 generic.go:334] "Generic (PLEG): container finished" podID="497ad917-0e7c-41f5-ba64-29a3f5e71ca3" containerID="021a4a3806642e2de283adf3f065c23c55c61b1c89bbc7d08a1dfe1839fd0386" exitCode=0 Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.747104 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78869465b8-8rvmm" event={"ID":"497ad917-0e7c-41f5-ba64-29a3f5e71ca3","Type":"ContainerDied","Data":"021a4a3806642e2de283adf3f065c23c55c61b1c89bbc7d08a1dfe1839fd0386"} Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.749465 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerStarted","Data":"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9"} Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.749688 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="ceilometer-central-agent" containerID="cri-o://8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48" gracePeriod=30 Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.749788 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.752860 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="proxy-httpd" containerID="cri-o://ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9" gracePeriod=30 Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.753274 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="sg-core" containerID="cri-o://d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0" gracePeriod=30 Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.753312 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="ceilometer-notification-agent" containerID="cri-o://65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460" gracePeriod=30 Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.778838 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.119291811 podStartE2EDuration="16.778817246s" podCreationTimestamp="2026-01-22 05:37:53 +0000 UTC" firstStartedPulling="2026-01-22 05:37:54.406055853 +0000 UTC m=+1160.489544068" lastFinishedPulling="2026-01-22 05:38:09.065581288 +0000 UTC m=+1175.149069503" observedRunningTime="2026-01-22 05:38:09.773016354 +0000 UTC m=+1175.856504569" watchObservedRunningTime="2026-01-22 05:38:09.778817246 +0000 UTC m=+1175.862305461" Jan 22 05:38:09 crc kubenswrapper[4814]: I0122 05:38:09.936796 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.083719 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-combined-ca-bundle\") pod \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.083802 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data\") pod \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.083829 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-448m5\" (UniqueName: \"kubernetes.io/projected/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-kube-api-access-448m5\") pod \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.084011 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data-custom\") pod \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\" (UID: \"497ad917-0e7c-41f5-ba64-29a3f5e71ca3\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.091800 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "497ad917-0e7c-41f5-ba64-29a3f5e71ca3" (UID: "497ad917-0e7c-41f5-ba64-29a3f5e71ca3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.095992 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-kube-api-access-448m5" (OuterVolumeSpecName: "kube-api-access-448m5") pod "497ad917-0e7c-41f5-ba64-29a3f5e71ca3" (UID: "497ad917-0e7c-41f5-ba64-29a3f5e71ca3"). InnerVolumeSpecName "kube-api-access-448m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.119847 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "497ad917-0e7c-41f5-ba64-29a3f5e71ca3" (UID: "497ad917-0e7c-41f5-ba64-29a3f5e71ca3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.186192 4814 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.186220 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.186230 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-448m5\" (UniqueName: \"kubernetes.io/projected/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-kube-api-access-448m5\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.186312 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data" (OuterVolumeSpecName: "config-data") pod "497ad917-0e7c-41f5-ba64-29a3f5e71ca3" (UID: "497ad917-0e7c-41f5-ba64-29a3f5e71ca3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.287683 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/497ad917-0e7c-41f5-ba64-29a3f5e71ca3-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.651314 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.757070 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-78869465b8-8rvmm" event={"ID":"497ad917-0e7c-41f5-ba64-29a3f5e71ca3","Type":"ContainerDied","Data":"a805cf5cb7ed60e2754757ce1938822c2ff601297a237af5315d6ebf89ea1123"} Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.757117 4814 scope.go:117] "RemoveContainer" containerID="021a4a3806642e2de283adf3f065c23c55c61b1c89bbc7d08a1dfe1839fd0386" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.757242 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-78869465b8-8rvmm" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760820 4814 generic.go:334] "Generic (PLEG): container finished" podID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerID="ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9" exitCode=0 Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760842 4814 generic.go:334] "Generic (PLEG): container finished" podID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerID="d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0" exitCode=2 Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760850 4814 generic.go:334] "Generic (PLEG): container finished" podID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerID="65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460" exitCode=0 Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760857 4814 generic.go:334] "Generic (PLEG): container finished" podID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerID="8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48" exitCode=0 Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760871 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerDied","Data":"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9"} Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760888 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerDied","Data":"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0"} Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760899 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerDied","Data":"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460"} Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760907 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerDied","Data":"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48"} Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760915 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fa8f1c7b-eee8-4b63-9ca2-775837a71674","Type":"ContainerDied","Data":"b58e74fd45148eba9f33677abcccf0de4a6b64b2b6e12e5af4808806770bbe38"} Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.760960 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.779705 4814 scope.go:117] "RemoveContainer" containerID="ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.783548 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-78869465b8-8rvmm"] Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.796664 4814 scope.go:117] "RemoveContainer" containerID="d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.801690 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-78869465b8-8rvmm"] Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.810199 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-combined-ca-bundle\") pod \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.810230 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-sg-core-conf-yaml\") pod \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.810329 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-scripts\") pod \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.810365 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8s42\" (UniqueName: \"kubernetes.io/projected/fa8f1c7b-eee8-4b63-9ca2-775837a71674-kube-api-access-x8s42\") pod \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.810389 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-run-httpd\") pod \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.810415 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-log-httpd\") pod \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.810566 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-config-data\") pod \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\" (UID: \"fa8f1c7b-eee8-4b63-9ca2-775837a71674\") " Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.813196 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fa8f1c7b-eee8-4b63-9ca2-775837a71674" (UID: "fa8f1c7b-eee8-4b63-9ca2-775837a71674"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.813345 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fa8f1c7b-eee8-4b63-9ca2-775837a71674" (UID: "fa8f1c7b-eee8-4b63-9ca2-775837a71674"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.819062 4814 scope.go:117] "RemoveContainer" containerID="65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.820796 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-scripts" (OuterVolumeSpecName: "scripts") pod "fa8f1c7b-eee8-4b63-9ca2-775837a71674" (UID: "fa8f1c7b-eee8-4b63-9ca2-775837a71674"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.821919 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa8f1c7b-eee8-4b63-9ca2-775837a71674-kube-api-access-x8s42" (OuterVolumeSpecName: "kube-api-access-x8s42") pod "fa8f1c7b-eee8-4b63-9ca2-775837a71674" (UID: "fa8f1c7b-eee8-4b63-9ca2-775837a71674"). InnerVolumeSpecName "kube-api-access-x8s42". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.868297 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fa8f1c7b-eee8-4b63-9ca2-775837a71674" (UID: "fa8f1c7b-eee8-4b63-9ca2-775837a71674"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.913311 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.913340 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.913350 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8s42\" (UniqueName: \"kubernetes.io/projected/fa8f1c7b-eee8-4b63-9ca2-775837a71674-kube-api-access-x8s42\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.913361 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.913370 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fa8f1c7b-eee8-4b63-9ca2-775837a71674-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.929807 4814 scope.go:117] "RemoveContainer" containerID="8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.932926 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa8f1c7b-eee8-4b63-9ca2-775837a71674" (UID: "fa8f1c7b-eee8-4b63-9ca2-775837a71674"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.952775 4814 scope.go:117] "RemoveContainer" containerID="ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9" Jan 22 05:38:10 crc kubenswrapper[4814]: E0122 05:38:10.953171 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": container with ID starting with ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9 not found: ID does not exist" containerID="ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.953206 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9"} err="failed to get container status \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": rpc error: code = NotFound desc = could not find container \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": container with ID starting with ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.953224 4814 scope.go:117] "RemoveContainer" containerID="d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0" Jan 22 05:38:10 crc kubenswrapper[4814]: E0122 05:38:10.953498 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": container with ID starting with d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0 not found: ID does not exist" containerID="d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.953518 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0"} err="failed to get container status \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": rpc error: code = NotFound desc = could not find container \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": container with ID starting with d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.953531 4814 scope.go:117] "RemoveContainer" containerID="65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460" Jan 22 05:38:10 crc kubenswrapper[4814]: E0122 05:38:10.957060 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": container with ID starting with 65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460 not found: ID does not exist" containerID="65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.957106 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460"} err="failed to get container status \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": rpc error: code = NotFound desc = could not find container \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": container with ID starting with 65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.957140 4814 scope.go:117] "RemoveContainer" containerID="8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48" Jan 22 05:38:10 crc kubenswrapper[4814]: E0122 05:38:10.957479 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": container with ID starting with 8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48 not found: ID does not exist" containerID="8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.957527 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48"} err="failed to get container status \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": rpc error: code = NotFound desc = could not find container \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": container with ID starting with 8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.957554 4814 scope.go:117] "RemoveContainer" containerID="ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.957848 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9"} err="failed to get container status \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": rpc error: code = NotFound desc = could not find container \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": container with ID starting with ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.957868 4814 scope.go:117] "RemoveContainer" containerID="d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958176 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0"} err="failed to get container status \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": rpc error: code = NotFound desc = could not find container \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": container with ID starting with d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958195 4814 scope.go:117] "RemoveContainer" containerID="65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958374 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460"} err="failed to get container status \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": rpc error: code = NotFound desc = could not find container \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": container with ID starting with 65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958390 4814 scope.go:117] "RemoveContainer" containerID="8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958548 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48"} err="failed to get container status \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": rpc error: code = NotFound desc = could not find container \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": container with ID starting with 8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958564 4814 scope.go:117] "RemoveContainer" containerID="ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958830 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9"} err="failed to get container status \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": rpc error: code = NotFound desc = could not find container \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": container with ID starting with ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958847 4814 scope.go:117] "RemoveContainer" containerID="d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.958987 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0"} err="failed to get container status \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": rpc error: code = NotFound desc = could not find container \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": container with ID starting with d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959004 4814 scope.go:117] "RemoveContainer" containerID="65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959172 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460"} err="failed to get container status \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": rpc error: code = NotFound desc = could not find container \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": container with ID starting with 65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959193 4814 scope.go:117] "RemoveContainer" containerID="8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959353 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48"} err="failed to get container status \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": rpc error: code = NotFound desc = could not find container \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": container with ID starting with 8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959369 4814 scope.go:117] "RemoveContainer" containerID="ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959502 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9"} err="failed to get container status \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": rpc error: code = NotFound desc = could not find container \"ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9\": container with ID starting with ba3a992cfe9d10ab0202ba9a6c0ae0214f96098e1cd0847f5c935a89401ed2f9 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959518 4814 scope.go:117] "RemoveContainer" containerID="d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959673 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0"} err="failed to get container status \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": rpc error: code = NotFound desc = could not find container \"d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0\": container with ID starting with d0a178fbf440bdb795f7a5dea26640a6661b14e5833eb23bad03b95cc1cd7ab0 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959689 4814 scope.go:117] "RemoveContainer" containerID="65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959904 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460"} err="failed to get container status \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": rpc error: code = NotFound desc = could not find container \"65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460\": container with ID starting with 65e9f6c34c259448796805ed94dc33f56a8efda94bc7a4c7ab636742e4a7f460 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.959926 4814 scope.go:117] "RemoveContainer" containerID="8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.960109 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48"} err="failed to get container status \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": rpc error: code = NotFound desc = could not find container \"8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48\": container with ID starting with 8f08035a218e0ddeaaea1b2861d85efb261d10db3de54d29410863c9bcb0ac48 not found: ID does not exist" Jan 22 05:38:10 crc kubenswrapper[4814]: I0122 05:38:10.969692 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-config-data" (OuterVolumeSpecName: "config-data") pod "fa8f1c7b-eee8-4b63-9ca2-775837a71674" (UID: "fa8f1c7b-eee8-4b63-9ca2-775837a71674"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.014052 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.014077 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa8f1c7b-eee8-4b63-9ca2-775837a71674-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.106640 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.116134 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138395 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138788 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" containerName="heat-cfnapi" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138804 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" containerName="heat-cfnapi" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138817 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="ceilometer-notification-agent" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138823 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="ceilometer-notification-agent" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138838 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09049d3c-1578-479a-b0e4-c853df37c918" containerName="heat-api" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138844 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="09049d3c-1578-479a-b0e4-c853df37c918" containerName="heat-api" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138851 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="ceilometer-central-agent" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138857 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="ceilometer-central-agent" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138869 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09049d3c-1578-479a-b0e4-c853df37c918" containerName="heat-api" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138875 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="09049d3c-1578-479a-b0e4-c853df37c918" containerName="heat-api" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138892 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" containerName="init" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138897 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" containerName="init" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138908 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" containerName="dnsmasq-dns" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138913 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" containerName="dnsmasq-dns" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138922 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="497ad917-0e7c-41f5-ba64-29a3f5e71ca3" containerName="heat-engine" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138927 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="497ad917-0e7c-41f5-ba64-29a3f5e71ca3" containerName="heat-engine" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138939 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="proxy-httpd" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138945 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="proxy-httpd" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.138955 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="sg-core" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.138960 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="sg-core" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139117 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6a0f4df-e1f7-4fab-ae4d-b0ea5241fcf3" containerName="dnsmasq-dns" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139133 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="sg-core" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139147 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="ceilometer-central-agent" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139160 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="09049d3c-1578-479a-b0e4-c853df37c918" containerName="heat-api" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139171 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="proxy-httpd" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139181 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" containerName="ceilometer-notification-agent" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139190 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="497ad917-0e7c-41f5-ba64-29a3f5e71ca3" containerName="heat-engine" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139197 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" containerName="heat-cfnapi" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139207 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="09049d3c-1578-479a-b0e4-c853df37c918" containerName="heat-api" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139216 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" containerName="heat-cfnapi" Jan 22 05:38:11 crc kubenswrapper[4814]: E0122 05:38:11.139369 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" containerName="heat-cfnapi" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.139376 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ea7d2e2-9d56-4f96-bb9a-646c1191e800" containerName="heat-cfnapi" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.140704 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.146047 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.148139 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.169853 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.216799 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-log-httpd\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.217043 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.217075 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.217118 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-run-httpd\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.217377 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-scripts\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.217519 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6wqr\" (UniqueName: \"kubernetes.io/projected/4320404e-73ff-4bb0-8d9c-7a3d49755fab-kube-api-access-b6wqr\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.217543 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-config-data\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.318611 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-scripts\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.318701 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6wqr\" (UniqueName: \"kubernetes.io/projected/4320404e-73ff-4bb0-8d9c-7a3d49755fab-kube-api-access-b6wqr\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.318719 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-config-data\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.318744 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-log-httpd\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.318758 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.318783 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.318828 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-run-httpd\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.319246 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-run-httpd\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.319331 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-log-httpd\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.322601 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.323172 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-scripts\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.323793 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-config-data\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.326256 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.338278 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6wqr\" (UniqueName: \"kubernetes.io/projected/4320404e-73ff-4bb0-8d9c-7a3d49755fab-kube-api-access-b6wqr\") pod \"ceilometer-0\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " pod="openstack/ceilometer-0" Jan 22 05:38:11 crc kubenswrapper[4814]: I0122 05:38:11.455415 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:12 crc kubenswrapper[4814]: I0122 05:38:12.016037 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:12 crc kubenswrapper[4814]: W0122 05:38:12.025288 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4320404e_73ff_4bb0_8d9c_7a3d49755fab.slice/crio-749837654b7819fe05d28d98994df5b6cced70c4b64e1fb5b22763f8e5153e28 WatchSource:0}: Error finding container 749837654b7819fe05d28d98994df5b6cced70c4b64e1fb5b22763f8e5153e28: Status 404 returned error can't find the container with id 749837654b7819fe05d28d98994df5b6cced70c4b64e1fb5b22763f8e5153e28 Jan 22 05:38:12 crc kubenswrapper[4814]: I0122 05:38:12.352804 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="497ad917-0e7c-41f5-ba64-29a3f5e71ca3" path="/var/lib/kubelet/pods/497ad917-0e7c-41f5-ba64-29a3f5e71ca3/volumes" Jan 22 05:38:12 crc kubenswrapper[4814]: I0122 05:38:12.353359 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa8f1c7b-eee8-4b63-9ca2-775837a71674" path="/var/lib/kubelet/pods/fa8f1c7b-eee8-4b63-9ca2-775837a71674/volumes" Jan 22 05:38:12 crc kubenswrapper[4814]: I0122 05:38:12.786955 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerStarted","Data":"018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f"} Jan 22 05:38:12 crc kubenswrapper[4814]: I0122 05:38:12.787212 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerStarted","Data":"749837654b7819fe05d28d98994df5b6cced70c4b64e1fb5b22763f8e5153e28"} Jan 22 05:38:13 crc kubenswrapper[4814]: I0122 05:38:13.191316 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:13 crc kubenswrapper[4814]: I0122 05:38:13.798434 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerStarted","Data":"782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f"} Jan 22 05:38:14 crc kubenswrapper[4814]: I0122 05:38:14.816976 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerStarted","Data":"09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39"} Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.222593 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.223281 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerName="glance-httpd" containerID="cri-o://4c89dd296fc00dba463e21e245a6e159ba4c521d268c8d11b56517760752df73" gracePeriod=30 Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.223145 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerName="glance-log" containerID="cri-o://c94f497335b11dd5bab0f1cf53950ffbd676240caabc9e2e4ed2d7910569f89a" gracePeriod=30 Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.828638 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerStarted","Data":"3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5"} Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.828819 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="ceilometer-central-agent" containerID="cri-o://018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f" gracePeriod=30 Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.829090 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.829369 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="proxy-httpd" containerID="cri-o://3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5" gracePeriod=30 Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.829429 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="sg-core" containerID="cri-o://09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39" gracePeriod=30 Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.829474 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="ceilometer-notification-agent" containerID="cri-o://782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f" gracePeriod=30 Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.835958 4814 generic.go:334] "Generic (PLEG): container finished" podID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerID="c94f497335b11dd5bab0f1cf53950ffbd676240caabc9e2e4ed2d7910569f89a" exitCode=143 Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.835998 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3249a9bd-6017-4d3a-80df-4b34a69af9e9","Type":"ContainerDied","Data":"c94f497335b11dd5bab0f1cf53950ffbd676240caabc9e2e4ed2d7910569f89a"} Jan 22 05:38:15 crc kubenswrapper[4814]: I0122 05:38:15.852160 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.923463696 podStartE2EDuration="4.852144235s" podCreationTimestamp="2026-01-22 05:38:11 +0000 UTC" firstStartedPulling="2026-01-22 05:38:12.027821572 +0000 UTC m=+1178.111309797" lastFinishedPulling="2026-01-22 05:38:14.956502121 +0000 UTC m=+1181.039990336" observedRunningTime="2026-01-22 05:38:15.849558804 +0000 UTC m=+1181.933047019" watchObservedRunningTime="2026-01-22 05:38:15.852144235 +0000 UTC m=+1181.935632450" Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.584898 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.585536 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" containerName="glance-log" containerID="cri-o://ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6" gracePeriod=30 Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.585853 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" containerName="glance-httpd" containerID="cri-o://efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113" gracePeriod=30 Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.846265 4814 generic.go:334] "Generic (PLEG): container finished" podID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerID="3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5" exitCode=0 Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.846296 4814 generic.go:334] "Generic (PLEG): container finished" podID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerID="09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39" exitCode=2 Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.846303 4814 generic.go:334] "Generic (PLEG): container finished" podID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerID="782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f" exitCode=0 Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.846352 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerDied","Data":"3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5"} Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.846377 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerDied","Data":"09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39"} Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.846386 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerDied","Data":"782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f"} Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.848901 4814 generic.go:334] "Generic (PLEG): container finished" podID="5e974d1b-3f13-47ce-b454-8388999303ae" containerID="ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6" exitCode=143 Jan 22 05:38:16 crc kubenswrapper[4814]: I0122 05:38:16.848936 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e974d1b-3f13-47ce-b454-8388999303ae","Type":"ContainerDied","Data":"ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6"} Jan 22 05:38:18 crc kubenswrapper[4814]: I0122 05:38:18.873340 4814 generic.go:334] "Generic (PLEG): container finished" podID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerID="4c89dd296fc00dba463e21e245a6e159ba4c521d268c8d11b56517760752df73" exitCode=0 Jan 22 05:38:18 crc kubenswrapper[4814]: I0122 05:38:18.873419 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3249a9bd-6017-4d3a-80df-4b34a69af9e9","Type":"ContainerDied","Data":"4c89dd296fc00dba463e21e245a6e159ba4c521d268c8d11b56517760752df73"} Jan 22 05:38:18 crc kubenswrapper[4814]: I0122 05:38:18.935865 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.072889 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.072936 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gc6dd\" (UniqueName: \"kubernetes.io/projected/3249a9bd-6017-4d3a-80df-4b34a69af9e9-kube-api-access-gc6dd\") pod \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.073024 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-logs\") pod \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.073075 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-scripts\") pod \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.073144 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-public-tls-certs\") pod \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.073180 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-httpd-run\") pod \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.073256 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-combined-ca-bundle\") pod \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.073365 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-config-data\") pod \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\" (UID: \"3249a9bd-6017-4d3a-80df-4b34a69af9e9\") " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.075043 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3249a9bd-6017-4d3a-80df-4b34a69af9e9" (UID: "3249a9bd-6017-4d3a-80df-4b34a69af9e9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.075060 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-logs" (OuterVolumeSpecName: "logs") pod "3249a9bd-6017-4d3a-80df-4b34a69af9e9" (UID: "3249a9bd-6017-4d3a-80df-4b34a69af9e9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.082329 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "3249a9bd-6017-4d3a-80df-4b34a69af9e9" (UID: "3249a9bd-6017-4d3a-80df-4b34a69af9e9"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.082956 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-scripts" (OuterVolumeSpecName: "scripts") pod "3249a9bd-6017-4d3a-80df-4b34a69af9e9" (UID: "3249a9bd-6017-4d3a-80df-4b34a69af9e9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.115911 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3249a9bd-6017-4d3a-80df-4b34a69af9e9-kube-api-access-gc6dd" (OuterVolumeSpecName: "kube-api-access-gc6dd") pod "3249a9bd-6017-4d3a-80df-4b34a69af9e9" (UID: "3249a9bd-6017-4d3a-80df-4b34a69af9e9"). InnerVolumeSpecName "kube-api-access-gc6dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.138806 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3249a9bd-6017-4d3a-80df-4b34a69af9e9" (UID: "3249a9bd-6017-4d3a-80df-4b34a69af9e9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.167470 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-config-data" (OuterVolumeSpecName: "config-data") pod "3249a9bd-6017-4d3a-80df-4b34a69af9e9" (UID: "3249a9bd-6017-4d3a-80df-4b34a69af9e9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.176914 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.177056 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.177141 4814 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.182895 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gc6dd\" (UniqueName: \"kubernetes.io/projected/3249a9bd-6017-4d3a-80df-4b34a69af9e9-kube-api-access-gc6dd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.182991 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.183081 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.183147 4814 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3249a9bd-6017-4d3a-80df-4b34a69af9e9-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.211887 4814 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.234605 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3249a9bd-6017-4d3a-80df-4b34a69af9e9" (UID: "3249a9bd-6017-4d3a-80df-4b34a69af9e9"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.284514 4814 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3249a9bd-6017-4d3a-80df-4b34a69af9e9-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.284544 4814 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.885077 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3249a9bd-6017-4d3a-80df-4b34a69af9e9","Type":"ContainerDied","Data":"cfa0e979f6bc95f4765bc0318ca34dc4d2c157d8d7ccf59bd40ef8ca52271fc6"} Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.885331 4814 scope.go:117] "RemoveContainer" containerID="4c89dd296fc00dba463e21e245a6e159ba4c521d268c8d11b56517760752df73" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.885459 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.917677 4814 scope.go:117] "RemoveContainer" containerID="c94f497335b11dd5bab0f1cf53950ffbd676240caabc9e2e4ed2d7910569f89a" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.938714 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.947444 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.974734 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:38:19 crc kubenswrapper[4814]: E0122 05:38:19.975390 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerName="glance-httpd" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.975407 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerName="glance-httpd" Jan 22 05:38:19 crc kubenswrapper[4814]: E0122 05:38:19.975425 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerName="glance-log" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.975433 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerName="glance-log" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.975745 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerName="glance-log" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.975796 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" containerName="glance-httpd" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.976950 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.989154 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.989348 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 22 05:38:19 crc kubenswrapper[4814]: I0122 05:38:19.994764 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.096524 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-scripts\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.096571 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8790567d-39f9-432d-9e92-078aa6944ffc-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.096638 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.096872 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-config-data\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.096970 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.097099 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.097161 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxzzb\" (UniqueName: \"kubernetes.io/projected/8790567d-39f9-432d-9e92-078aa6944ffc-kube-api-access-wxzzb\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.097184 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8790567d-39f9-432d-9e92-078aa6944ffc-logs\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.199126 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-scripts\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.199183 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8790567d-39f9-432d-9e92-078aa6944ffc-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.199240 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.199269 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-config-data\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.199289 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.199324 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.199376 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxzzb\" (UniqueName: \"kubernetes.io/projected/8790567d-39f9-432d-9e92-078aa6944ffc-kube-api-access-wxzzb\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.199399 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8790567d-39f9-432d-9e92-078aa6944ffc-logs\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.200082 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8790567d-39f9-432d-9e92-078aa6944ffc-logs\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.200300 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8790567d-39f9-432d-9e92-078aa6944ffc-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.200652 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.210292 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.212919 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.213833 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-config-data\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.227851 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8790567d-39f9-432d-9e92-078aa6944ffc-scripts\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.229268 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxzzb\" (UniqueName: \"kubernetes.io/projected/8790567d-39f9-432d-9e92-078aa6944ffc-kube-api-access-wxzzb\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.256472 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-external-api-0\" (UID: \"8790567d-39f9-432d-9e92-078aa6944ffc\") " pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.314733 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.391206 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3249a9bd-6017-4d3a-80df-4b34a69af9e9" path="/var/lib/kubelet/pods/3249a9bd-6017-4d3a-80df-4b34a69af9e9/volumes" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.903394 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" event={"ID":"f1fd8e88-42ab-43bb-8697-c7aebb8fec34","Type":"ContainerStarted","Data":"b54f70c21d7b84c566a44ab516c7816eaf57e58dce24c3e7b2dcee555e9ace84"} Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.929341 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" podStartSLOduration=3.07430476 podStartE2EDuration="30.929323875s" podCreationTimestamp="2026-01-22 05:37:50 +0000 UTC" firstStartedPulling="2026-01-22 05:37:51.887807967 +0000 UTC m=+1157.971296182" lastFinishedPulling="2026-01-22 05:38:19.742827062 +0000 UTC m=+1185.826315297" observedRunningTime="2026-01-22 05:38:20.925047931 +0000 UTC m=+1187.008536146" watchObservedRunningTime="2026-01-22 05:38:20.929323875 +0000 UTC m=+1187.012812090" Jan 22 05:38:20 crc kubenswrapper[4814]: I0122 05:38:20.963499 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.575756 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.729797 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"5e974d1b-3f13-47ce-b454-8388999303ae\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.730301 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jc9v\" (UniqueName: \"kubernetes.io/projected/5e974d1b-3f13-47ce-b454-8388999303ae-kube-api-access-8jc9v\") pod \"5e974d1b-3f13-47ce-b454-8388999303ae\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.730350 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-scripts\") pod \"5e974d1b-3f13-47ce-b454-8388999303ae\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.730456 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-logs\") pod \"5e974d1b-3f13-47ce-b454-8388999303ae\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.730529 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-internal-tls-certs\") pod \"5e974d1b-3f13-47ce-b454-8388999303ae\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.730647 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-combined-ca-bundle\") pod \"5e974d1b-3f13-47ce-b454-8388999303ae\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.730694 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-config-data\") pod \"5e974d1b-3f13-47ce-b454-8388999303ae\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.730727 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-httpd-run\") pod \"5e974d1b-3f13-47ce-b454-8388999303ae\" (UID: \"5e974d1b-3f13-47ce-b454-8388999303ae\") " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.731783 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5e974d1b-3f13-47ce-b454-8388999303ae" (UID: "5e974d1b-3f13-47ce-b454-8388999303ae"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.732748 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-logs" (OuterVolumeSpecName: "logs") pod "5e974d1b-3f13-47ce-b454-8388999303ae" (UID: "5e974d1b-3f13-47ce-b454-8388999303ae"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.749458 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e974d1b-3f13-47ce-b454-8388999303ae-kube-api-access-8jc9v" (OuterVolumeSpecName: "kube-api-access-8jc9v") pod "5e974d1b-3f13-47ce-b454-8388999303ae" (UID: "5e974d1b-3f13-47ce-b454-8388999303ae"). InnerVolumeSpecName "kube-api-access-8jc9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.780350 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5e974d1b-3f13-47ce-b454-8388999303ae" (UID: "5e974d1b-3f13-47ce-b454-8388999303ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.785819 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-scripts" (OuterVolumeSpecName: "scripts") pod "5e974d1b-3f13-47ce-b454-8388999303ae" (UID: "5e974d1b-3f13-47ce-b454-8388999303ae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.790853 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "5e974d1b-3f13-47ce-b454-8388999303ae" (UID: "5e974d1b-3f13-47ce-b454-8388999303ae"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.812953 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-config-data" (OuterVolumeSpecName: "config-data") pod "5e974d1b-3f13-47ce-b454-8388999303ae" (UID: "5e974d1b-3f13-47ce-b454-8388999303ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.832564 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.832595 4814 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.832652 4814 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.832665 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jc9v\" (UniqueName: \"kubernetes.io/projected/5e974d1b-3f13-47ce-b454-8388999303ae-kube-api-access-8jc9v\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.832674 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.832682 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5e974d1b-3f13-47ce-b454-8388999303ae-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.835923 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.843757 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5e974d1b-3f13-47ce-b454-8388999303ae" (UID: "5e974d1b-3f13-47ce-b454-8388999303ae"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.857809 4814 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.915005 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8790567d-39f9-432d-9e92-078aa6944ffc","Type":"ContainerStarted","Data":"599ef4d5aff38037ca31657eaa9d0f911a508b252717b6ebcef43f3202c871ad"} Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.916367 4814 generic.go:334] "Generic (PLEG): container finished" podID="5e974d1b-3f13-47ce-b454-8388999303ae" containerID="efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113" exitCode=0 Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.916391 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e974d1b-3f13-47ce-b454-8388999303ae","Type":"ContainerDied","Data":"efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113"} Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.916406 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5e974d1b-3f13-47ce-b454-8388999303ae","Type":"ContainerDied","Data":"dbdfa45b43487514450f7cc8bca52e0072a4f059e07f9b5e96883f0fde240d5b"} Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.916423 4814 scope.go:117] "RemoveContainer" containerID="efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.916433 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.938462 4814 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.938485 4814 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e974d1b-3f13-47ce-b454-8388999303ae-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.947903 4814 scope.go:117] "RemoveContainer" containerID="ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.969131 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.976522 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.993399 4814 scope.go:117] "RemoveContainer" containerID="efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113" Jan 22 05:38:21 crc kubenswrapper[4814]: E0122 05:38:21.993833 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113\": container with ID starting with efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113 not found: ID does not exist" containerID="efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.993931 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113"} err="failed to get container status \"efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113\": rpc error: code = NotFound desc = could not find container \"efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113\": container with ID starting with efeb832c17205f8476fcd09ceb3f9b6ac19a971f8c1491912d2824b1ebf75113 not found: ID does not exist" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.994007 4814 scope.go:117] "RemoveContainer" containerID="ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6" Jan 22 05:38:21 crc kubenswrapper[4814]: E0122 05:38:21.994304 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6\": container with ID starting with ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6 not found: ID does not exist" containerID="ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6" Jan 22 05:38:21 crc kubenswrapper[4814]: I0122 05:38:21.994346 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6"} err="failed to get container status \"ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6\": rpc error: code = NotFound desc = could not find container \"ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6\": container with ID starting with ecaac6e89002ab3731cd78f85a47bcdd5524ec224312d3a2c40b451e602f04a6 not found: ID does not exist" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.003015 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:38:22 crc kubenswrapper[4814]: E0122 05:38:22.003427 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" containerName="glance-httpd" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.003442 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" containerName="glance-httpd" Jan 22 05:38:22 crc kubenswrapper[4814]: E0122 05:38:22.003454 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" containerName="glance-log" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.003463 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" containerName="glance-log" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.003713 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" containerName="glance-log" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.003733 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" containerName="glance-httpd" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.004646 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.006863 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.007731 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.023017 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.143175 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/050e26aa-06f3-4677-9d66-e0794a889165-logs\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.143213 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/050e26aa-06f3-4677-9d66-e0794a889165-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.143313 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-config-data\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.143336 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.143353 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdpqj\" (UniqueName: \"kubernetes.io/projected/050e26aa-06f3-4677-9d66-e0794a889165-kube-api-access-cdpqj\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.143366 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-scripts\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.143413 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.143428 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.245886 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-config-data\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.245931 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-scripts\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.245951 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.245968 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdpqj\" (UniqueName: \"kubernetes.io/projected/050e26aa-06f3-4677-9d66-e0794a889165-kube-api-access-cdpqj\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.246016 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.246034 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.246063 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/050e26aa-06f3-4677-9d66-e0794a889165-logs\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.246081 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/050e26aa-06f3-4677-9d66-e0794a889165-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.246460 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/050e26aa-06f3-4677-9d66-e0794a889165-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.248601 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.250028 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/050e26aa-06f3-4677-9d66-e0794a889165-logs\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.252982 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.253846 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-config-data\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.259118 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.272312 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdpqj\" (UniqueName: \"kubernetes.io/projected/050e26aa-06f3-4677-9d66-e0794a889165-kube-api-access-cdpqj\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.278545 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/050e26aa-06f3-4677-9d66-e0794a889165-scripts\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.317111 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"050e26aa-06f3-4677-9d66-e0794a889165\") " pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.321359 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.360944 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e974d1b-3f13-47ce-b454-8388999303ae" path="/var/lib/kubelet/pods/5e974d1b-3f13-47ce-b454-8388999303ae/volumes" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.662717 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.755547 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-log-httpd\") pod \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756046 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4320404e-73ff-4bb0-8d9c-7a3d49755fab" (UID: "4320404e-73ff-4bb0-8d9c-7a3d49755fab"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756114 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6wqr\" (UniqueName: \"kubernetes.io/projected/4320404e-73ff-4bb0-8d9c-7a3d49755fab-kube-api-access-b6wqr\") pod \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756211 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-scripts\") pod \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756237 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-config-data\") pod \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756257 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-run-httpd\") pod \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756289 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-combined-ca-bundle\") pod \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756426 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-sg-core-conf-yaml\") pod \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\" (UID: \"4320404e-73ff-4bb0-8d9c-7a3d49755fab\") " Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756792 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.756931 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4320404e-73ff-4bb0-8d9c-7a3d49755fab" (UID: "4320404e-73ff-4bb0-8d9c-7a3d49755fab"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.761062 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-scripts" (OuterVolumeSpecName: "scripts") pod "4320404e-73ff-4bb0-8d9c-7a3d49755fab" (UID: "4320404e-73ff-4bb0-8d9c-7a3d49755fab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.761273 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4320404e-73ff-4bb0-8d9c-7a3d49755fab-kube-api-access-b6wqr" (OuterVolumeSpecName: "kube-api-access-b6wqr") pod "4320404e-73ff-4bb0-8d9c-7a3d49755fab" (UID: "4320404e-73ff-4bb0-8d9c-7a3d49755fab"). InnerVolumeSpecName "kube-api-access-b6wqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.784784 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4320404e-73ff-4bb0-8d9c-7a3d49755fab" (UID: "4320404e-73ff-4bb0-8d9c-7a3d49755fab"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.850316 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4320404e-73ff-4bb0-8d9c-7a3d49755fab" (UID: "4320404e-73ff-4bb0-8d9c-7a3d49755fab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.860605 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.860656 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6wqr\" (UniqueName: \"kubernetes.io/projected/4320404e-73ff-4bb0-8d9c-7a3d49755fab-kube-api-access-b6wqr\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.860666 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.860674 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4320404e-73ff-4bb0-8d9c-7a3d49755fab-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.860682 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.876822 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-config-data" (OuterVolumeSpecName: "config-data") pod "4320404e-73ff-4bb0-8d9c-7a3d49755fab" (UID: "4320404e-73ff-4bb0-8d9c-7a3d49755fab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.933715 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.936945 4814 generic.go:334] "Generic (PLEG): container finished" podID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerID="018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f" exitCode=0 Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.936992 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.937010 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerDied","Data":"018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f"} Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.937032 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4320404e-73ff-4bb0-8d9c-7a3d49755fab","Type":"ContainerDied","Data":"749837654b7819fe05d28d98994df5b6cced70c4b64e1fb5b22763f8e5153e28"} Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.937048 4814 scope.go:117] "RemoveContainer" containerID="3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5" Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.944113 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8790567d-39f9-432d-9e92-078aa6944ffc","Type":"ContainerStarted","Data":"b8a6770f46e48983f0cdb21590ed81cc235b076d529b55915ee89c1f3c0ada26"} Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.944233 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8790567d-39f9-432d-9e92-078aa6944ffc","Type":"ContainerStarted","Data":"59b99ac6a9a520e71b76a3cd872455c2ef747efc725c03331bfd435b73b06c6a"} Jan 22 05:38:22 crc kubenswrapper[4814]: I0122 05:38:22.983026 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4320404e-73ff-4bb0-8d9c-7a3d49755fab-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.005717 4814 scope.go:117] "RemoveContainer" containerID="09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.032014 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.031992527 podStartE2EDuration="4.031992527s" podCreationTimestamp="2026-01-22 05:38:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:38:22.983434943 +0000 UTC m=+1189.066923158" watchObservedRunningTime="2026-01-22 05:38:23.031992527 +0000 UTC m=+1189.115480742" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.053474 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.058053 4814 scope.go:117] "RemoveContainer" containerID="782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.073826 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.090466 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:23 crc kubenswrapper[4814]: E0122 05:38:23.090950 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="proxy-httpd" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.090971 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="proxy-httpd" Jan 22 05:38:23 crc kubenswrapper[4814]: E0122 05:38:23.090996 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="ceilometer-central-agent" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.091004 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="ceilometer-central-agent" Jan 22 05:38:23 crc kubenswrapper[4814]: E0122 05:38:23.091026 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="sg-core" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.091032 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="sg-core" Jan 22 05:38:23 crc kubenswrapper[4814]: E0122 05:38:23.091062 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="ceilometer-notification-agent" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.091069 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="ceilometer-notification-agent" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.091255 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="sg-core" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.091269 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="ceilometer-central-agent" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.091280 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="ceilometer-notification-agent" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.091293 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" containerName="proxy-httpd" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.094585 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.096935 4814 scope.go:117] "RemoveContainer" containerID="018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.102956 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.105030 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.106233 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.137884 4814 scope.go:117] "RemoveContainer" containerID="3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5" Jan 22 05:38:23 crc kubenswrapper[4814]: E0122 05:38:23.138524 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5\": container with ID starting with 3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5 not found: ID does not exist" containerID="3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.138587 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5"} err="failed to get container status \"3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5\": rpc error: code = NotFound desc = could not find container \"3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5\": container with ID starting with 3e7e5af767c984d3fdaf06779eb9462728040348569189fc51cd1706555d75d5 not found: ID does not exist" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.138643 4814 scope.go:117] "RemoveContainer" containerID="09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39" Jan 22 05:38:23 crc kubenswrapper[4814]: E0122 05:38:23.139214 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39\": container with ID starting with 09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39 not found: ID does not exist" containerID="09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.139266 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39"} err="failed to get container status \"09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39\": rpc error: code = NotFound desc = could not find container \"09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39\": container with ID starting with 09656491aa1e669f70e60fc363d9f4b07296cf82c06d2e67ad0af6027edfde39 not found: ID does not exist" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.139294 4814 scope.go:117] "RemoveContainer" containerID="782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f" Jan 22 05:38:23 crc kubenswrapper[4814]: E0122 05:38:23.139711 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f\": container with ID starting with 782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f not found: ID does not exist" containerID="782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.139743 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f"} err="failed to get container status \"782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f\": rpc error: code = NotFound desc = could not find container \"782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f\": container with ID starting with 782a6d10f00aa687b252061946904aabde8a80565bc89d2277d36b81b40baa4f not found: ID does not exist" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.139759 4814 scope.go:117] "RemoveContainer" containerID="018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f" Jan 22 05:38:23 crc kubenswrapper[4814]: E0122 05:38:23.140006 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f\": container with ID starting with 018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f not found: ID does not exist" containerID="018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.140036 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f"} err="failed to get container status \"018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f\": rpc error: code = NotFound desc = could not find container \"018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f\": container with ID starting with 018cbf4d21c0daec709601d8abb8addb536339766bc209b413b89f750e33dc2f not found: ID does not exist" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.188854 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-config-data\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.188927 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.189215 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rgd6\" (UniqueName: \"kubernetes.io/projected/0eedaf08-b691-48c7-a80e-428b4b22bf1d-kube-api-access-6rgd6\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.189260 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-log-httpd\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.189288 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.189375 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-scripts\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.189408 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-run-httpd\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.291420 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rgd6\" (UniqueName: \"kubernetes.io/projected/0eedaf08-b691-48c7-a80e-428b4b22bf1d-kube-api-access-6rgd6\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.291479 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-log-httpd\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.291533 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.291556 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-scripts\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.291579 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-run-httpd\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.291650 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-config-data\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.291701 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.291950 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-log-httpd\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.292187 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-run-httpd\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.298072 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.298098 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.299855 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-scripts\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.305126 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-config-data\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.308645 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rgd6\" (UniqueName: \"kubernetes.io/projected/0eedaf08-b691-48c7-a80e-428b4b22bf1d-kube-api-access-6rgd6\") pod \"ceilometer-0\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.416912 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.750678 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.766330 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.984312 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerStarted","Data":"ddae2a572d6a31daae029a862815a47e278a4388145824827fe9c66f000848a2"} Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.989530 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"050e26aa-06f3-4677-9d66-e0794a889165","Type":"ContainerStarted","Data":"dc77a552aaf74147fd45dcd4fbf023f3fa9351365d1f2a2a0e55eeb88e9ae8e7"} Jan 22 05:38:23 crc kubenswrapper[4814]: I0122 05:38:23.989602 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"050e26aa-06f3-4677-9d66-e0794a889165","Type":"ContainerStarted","Data":"1d455cc042060de2ee894efbd48ad96a55499f15ed80b9abdf3e60b5b845cb88"} Jan 22 05:38:24 crc kubenswrapper[4814]: I0122 05:38:24.358266 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4320404e-73ff-4bb0-8d9c-7a3d49755fab" path="/var/lib/kubelet/pods/4320404e-73ff-4bb0-8d9c-7a3d49755fab/volumes" Jan 22 05:38:25 crc kubenswrapper[4814]: I0122 05:38:25.001896 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerStarted","Data":"eb063246da416c0a364dcdf4c34ffaa062ae9e4caa43bb2ae89c2fcb2117b440"} Jan 22 05:38:25 crc kubenswrapper[4814]: I0122 05:38:25.004523 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"050e26aa-06f3-4677-9d66-e0794a889165","Type":"ContainerStarted","Data":"7f3607e6e343b5c3dedbfcf4798a790d212101867a60af0f32eb2cf1eb3521f7"} Jan 22 05:38:25 crc kubenswrapper[4814]: I0122 05:38:25.027638 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.027603969 podStartE2EDuration="4.027603969s" podCreationTimestamp="2026-01-22 05:38:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:38:25.021302511 +0000 UTC m=+1191.104790726" watchObservedRunningTime="2026-01-22 05:38:25.027603969 +0000 UTC m=+1191.111092194" Jan 22 05:38:26 crc kubenswrapper[4814]: I0122 05:38:26.016684 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerStarted","Data":"b569b2b04ec3c8a76550b75a4fa83148edf9310ee809b327354b809b671c77be"} Jan 22 05:38:26 crc kubenswrapper[4814]: I0122 05:38:26.017302 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerStarted","Data":"2140c65f026d9c712f279365c452005acd83d69727701122e7fae27ece2fafa4"} Jan 22 05:38:27 crc kubenswrapper[4814]: I0122 05:38:27.029911 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerStarted","Data":"780251ab63d35967e16e715f8b4ca01b8934bb84fbbbf88c92934165e6eb112b"} Jan 22 05:38:27 crc kubenswrapper[4814]: I0122 05:38:27.031247 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:38:27 crc kubenswrapper[4814]: I0122 05:38:27.050790 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.089907915 podStartE2EDuration="4.050767405s" podCreationTimestamp="2026-01-22 05:38:23 +0000 UTC" firstStartedPulling="2026-01-22 05:38:23.766116461 +0000 UTC m=+1189.849604676" lastFinishedPulling="2026-01-22 05:38:26.726975951 +0000 UTC m=+1192.810464166" observedRunningTime="2026-01-22 05:38:27.049431072 +0000 UTC m=+1193.132919277" watchObservedRunningTime="2026-01-22 05:38:27.050767405 +0000 UTC m=+1193.134255620" Jan 22 05:38:30 crc kubenswrapper[4814]: I0122 05:38:30.316691 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 05:38:30 crc kubenswrapper[4814]: I0122 05:38:30.317237 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 22 05:38:30 crc kubenswrapper[4814]: I0122 05:38:30.352449 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 05:38:30 crc kubenswrapper[4814]: I0122 05:38:30.388134 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 22 05:38:31 crc kubenswrapper[4814]: I0122 05:38:31.064115 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 05:38:31 crc kubenswrapper[4814]: I0122 05:38:31.064157 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.072287 4814 generic.go:334] "Generic (PLEG): container finished" podID="f1fd8e88-42ab-43bb-8697-c7aebb8fec34" containerID="b54f70c21d7b84c566a44ab516c7816eaf57e58dce24c3e7b2dcee555e9ace84" exitCode=0 Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.072369 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" event={"ID":"f1fd8e88-42ab-43bb-8697-c7aebb8fec34","Type":"ContainerDied","Data":"b54f70c21d7b84c566a44ab516c7816eaf57e58dce24c3e7b2dcee555e9ace84"} Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.322597 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.322744 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.361249 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.377100 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.377318 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="ceilometer-central-agent" containerID="cri-o://eb063246da416c0a364dcdf4c34ffaa062ae9e4caa43bb2ae89c2fcb2117b440" gracePeriod=30 Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.377373 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="proxy-httpd" containerID="cri-o://780251ab63d35967e16e715f8b4ca01b8934bb84fbbbf88c92934165e6eb112b" gracePeriod=30 Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.377388 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="sg-core" containerID="cri-o://b569b2b04ec3c8a76550b75a4fa83148edf9310ee809b327354b809b671c77be" gracePeriod=30 Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.377444 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="ceilometer-notification-agent" containerID="cri-o://2140c65f026d9c712f279365c452005acd83d69727701122e7fae27ece2fafa4" gracePeriod=30 Jan 22 05:38:32 crc kubenswrapper[4814]: I0122 05:38:32.380980 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.125437 4814 generic.go:334] "Generic (PLEG): container finished" podID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerID="780251ab63d35967e16e715f8b4ca01b8934bb84fbbbf88c92934165e6eb112b" exitCode=0 Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.127137 4814 generic.go:334] "Generic (PLEG): container finished" podID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerID="b569b2b04ec3c8a76550b75a4fa83148edf9310ee809b327354b809b671c77be" exitCode=2 Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.127149 4814 generic.go:334] "Generic (PLEG): container finished" podID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerID="2140c65f026d9c712f279365c452005acd83d69727701122e7fae27ece2fafa4" exitCode=0 Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.127156 4814 generic.go:334] "Generic (PLEG): container finished" podID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerID="eb063246da416c0a364dcdf4c34ffaa062ae9e4caa43bb2ae89c2fcb2117b440" exitCode=0 Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.131351 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerDied","Data":"780251ab63d35967e16e715f8b4ca01b8934bb84fbbbf88c92934165e6eb112b"} Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.131385 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerDied","Data":"b569b2b04ec3c8a76550b75a4fa83148edf9310ee809b327354b809b671c77be"} Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.131401 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.131412 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerDied","Data":"2140c65f026d9c712f279365c452005acd83d69727701122e7fae27ece2fafa4"} Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.132060 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerDied","Data":"eb063246da416c0a364dcdf4c34ffaa062ae9e4caa43bb2ae89c2fcb2117b440"} Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.132080 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.281859 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379067 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-run-httpd\") pod \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379147 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-scripts\") pod \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379204 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-combined-ca-bundle\") pod \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379253 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-sg-core-conf-yaml\") pod \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379288 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rgd6\" (UniqueName: \"kubernetes.io/projected/0eedaf08-b691-48c7-a80e-428b4b22bf1d-kube-api-access-6rgd6\") pod \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379321 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-config-data\") pod \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379369 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-log-httpd\") pod \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\" (UID: \"0eedaf08-b691-48c7-a80e-428b4b22bf1d\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379570 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0eedaf08-b691-48c7-a80e-428b4b22bf1d" (UID: "0eedaf08-b691-48c7-a80e-428b4b22bf1d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.379854 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0eedaf08-b691-48c7-a80e-428b4b22bf1d" (UID: "0eedaf08-b691-48c7-a80e-428b4b22bf1d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.383600 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.383721 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0eedaf08-b691-48c7-a80e-428b4b22bf1d-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.387817 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-scripts" (OuterVolumeSpecName: "scripts") pod "0eedaf08-b691-48c7-a80e-428b4b22bf1d" (UID: "0eedaf08-b691-48c7-a80e-428b4b22bf1d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.399761 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eedaf08-b691-48c7-a80e-428b4b22bf1d-kube-api-access-6rgd6" (OuterVolumeSpecName: "kube-api-access-6rgd6") pod "0eedaf08-b691-48c7-a80e-428b4b22bf1d" (UID: "0eedaf08-b691-48c7-a80e-428b4b22bf1d"). InnerVolumeSpecName "kube-api-access-6rgd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.415699 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0eedaf08-b691-48c7-a80e-428b4b22bf1d" (UID: "0eedaf08-b691-48c7-a80e-428b4b22bf1d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.441819 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.489871 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.490136 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.490208 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rgd6\" (UniqueName: \"kubernetes.io/projected/0eedaf08-b691-48c7-a80e-428b4b22bf1d-kube-api-access-6rgd6\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.580771 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0eedaf08-b691-48c7-a80e-428b4b22bf1d" (UID: "0eedaf08-b691-48c7-a80e-428b4b22bf1d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.596263 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-config-data\") pod \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.596323 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-scripts\") pod \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.596430 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ncvr\" (UniqueName: \"kubernetes.io/projected/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-kube-api-access-5ncvr\") pod \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.596447 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-combined-ca-bundle\") pod \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\" (UID: \"f1fd8e88-42ab-43bb-8697-c7aebb8fec34\") " Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.596855 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.617987 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-scripts" (OuterVolumeSpecName: "scripts") pod "f1fd8e88-42ab-43bb-8697-c7aebb8fec34" (UID: "f1fd8e88-42ab-43bb-8697-c7aebb8fec34"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.620918 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-kube-api-access-5ncvr" (OuterVolumeSpecName: "kube-api-access-5ncvr") pod "f1fd8e88-42ab-43bb-8697-c7aebb8fec34" (UID: "f1fd8e88-42ab-43bb-8697-c7aebb8fec34"). InnerVolumeSpecName "kube-api-access-5ncvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.665098 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-config-data" (OuterVolumeSpecName: "config-data") pod "0eedaf08-b691-48c7-a80e-428b4b22bf1d" (UID: "0eedaf08-b691-48c7-a80e-428b4b22bf1d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.670753 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-config-data" (OuterVolumeSpecName: "config-data") pod "f1fd8e88-42ab-43bb-8697-c7aebb8fec34" (UID: "f1fd8e88-42ab-43bb-8697-c7aebb8fec34"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.696890 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1fd8e88-42ab-43bb-8697-c7aebb8fec34" (UID: "f1fd8e88-42ab-43bb-8697-c7aebb8fec34"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.697973 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0eedaf08-b691-48c7-a80e-428b4b22bf1d-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.697997 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.698006 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.698015 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ncvr\" (UniqueName: \"kubernetes.io/projected/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-kube-api-access-5ncvr\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:33 crc kubenswrapper[4814]: I0122 05:38:33.698024 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1fd8e88-42ab-43bb-8697-c7aebb8fec34-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.108935 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.109255 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.146857 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.147970 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-z4pzm" event={"ID":"f1fd8e88-42ab-43bb-8697-c7aebb8fec34","Type":"ContainerDied","Data":"a57d7cf5e981fbba257a45eaac39e836cfe4ff0079bd5e7eef00c11a13f527c8"} Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.148017 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a57d7cf5e981fbba257a45eaac39e836cfe4ff0079bd5e7eef00c11a13f527c8" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.159749 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.161161 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0eedaf08-b691-48c7-a80e-428b4b22bf1d","Type":"ContainerDied","Data":"ddae2a572d6a31daae029a862815a47e278a4388145824827fe9c66f000848a2"} Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.161210 4814 scope.go:117] "RemoveContainer" containerID="780251ab63d35967e16e715f8b4ca01b8934bb84fbbbf88c92934165e6eb112b" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205235 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 05:38:34 crc kubenswrapper[4814]: E0122 05:38:34.205669 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="ceilometer-central-agent" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205685 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="ceilometer-central-agent" Jan 22 05:38:34 crc kubenswrapper[4814]: E0122 05:38:34.205706 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="sg-core" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205713 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="sg-core" Jan 22 05:38:34 crc kubenswrapper[4814]: E0122 05:38:34.205726 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="proxy-httpd" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205733 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="proxy-httpd" Jan 22 05:38:34 crc kubenswrapper[4814]: E0122 05:38:34.205753 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="ceilometer-notification-agent" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205759 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="ceilometer-notification-agent" Jan 22 05:38:34 crc kubenswrapper[4814]: E0122 05:38:34.205767 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1fd8e88-42ab-43bb-8697-c7aebb8fec34" containerName="nova-cell0-conductor-db-sync" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205773 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1fd8e88-42ab-43bb-8697-c7aebb8fec34" containerName="nova-cell0-conductor-db-sync" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205938 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1fd8e88-42ab-43bb-8697-c7aebb8fec34" containerName="nova-cell0-conductor-db-sync" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205952 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="ceilometer-notification-agent" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205966 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="sg-core" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205977 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="proxy-httpd" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.205992 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" containerName="ceilometer-central-agent" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.206592 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.213576 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-psbcp" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.213797 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.226292 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.245266 4814 scope.go:117] "RemoveContainer" containerID="b569b2b04ec3c8a76550b75a4fa83148edf9310ee809b327354b809b671c77be" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.245370 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.264372 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.282191 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.291540 4814 scope.go:117] "RemoveContainer" containerID="2140c65f026d9c712f279365c452005acd83d69727701122e7fae27ece2fafa4" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.304531 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.314535 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/701ff82d-a122-4606-a252-0554e0dd1cab-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.314620 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45km4\" (UniqueName: \"kubernetes.io/projected/701ff82d-a122-4606-a252-0554e0dd1cab-kube-api-access-45km4\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.314663 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/701ff82d-a122-4606-a252-0554e0dd1cab-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.322133 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.332061 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.332124 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.332297 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.390483 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0eedaf08-b691-48c7-a80e-428b4b22bf1d" path="/var/lib/kubelet/pods/0eedaf08-b691-48c7-a80e-428b4b22bf1d/volumes" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.395242 4814 scope.go:117] "RemoveContainer" containerID="eb063246da416c0a364dcdf4c34ffaa062ae9e4caa43bb2ae89c2fcb2117b440" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416671 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-config-data\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416715 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416740 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/701ff82d-a122-4606-a252-0554e0dd1cab-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416769 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-run-httpd\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416797 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzk4s\" (UniqueName: \"kubernetes.io/projected/2ad45f3f-cafc-4429-9042-88395b7a31dc-kube-api-access-bzk4s\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416813 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-log-httpd\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416860 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45km4\" (UniqueName: \"kubernetes.io/projected/701ff82d-a122-4606-a252-0554e0dd1cab-kube-api-access-45km4\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416902 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/701ff82d-a122-4606-a252-0554e0dd1cab-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416920 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-scripts\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.416954 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.430095 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/701ff82d-a122-4606-a252-0554e0dd1cab-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.432524 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45km4\" (UniqueName: \"kubernetes.io/projected/701ff82d-a122-4606-a252-0554e0dd1cab-kube-api-access-45km4\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.433672 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/701ff82d-a122-4606-a252-0554e0dd1cab-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"701ff82d-a122-4606-a252-0554e0dd1cab\") " pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.518832 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-config-data\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.519082 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.519121 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-run-httpd\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.519146 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzk4s\" (UniqueName: \"kubernetes.io/projected/2ad45f3f-cafc-4429-9042-88395b7a31dc-kube-api-access-bzk4s\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.519165 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-log-httpd\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.519215 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-scripts\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.519246 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.523321 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-config-data\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.528099 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-run-httpd\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.528362 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-log-httpd\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.535693 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-scripts\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.543467 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.544751 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.557733 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzk4s\" (UniqueName: \"kubernetes.io/projected/2ad45f3f-cafc-4429-9042-88395b7a31dc-kube-api-access-bzk4s\") pod \"ceilometer-0\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " pod="openstack/ceilometer-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.590964 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:34 crc kubenswrapper[4814]: I0122 05:38:34.653328 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:35 crc kubenswrapper[4814]: I0122 05:38:35.060094 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 22 05:38:35 crc kubenswrapper[4814]: W0122 05:38:35.071113 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod701ff82d_a122_4606_a252_0554e0dd1cab.slice/crio-194b3bbdb89d6cb41eda46cd15c2940dee3f9909a834a72ed09eb0ca02cc8064 WatchSource:0}: Error finding container 194b3bbdb89d6cb41eda46cd15c2940dee3f9909a834a72ed09eb0ca02cc8064: Status 404 returned error can't find the container with id 194b3bbdb89d6cb41eda46cd15c2940dee3f9909a834a72ed09eb0ca02cc8064 Jan 22 05:38:35 crc kubenswrapper[4814]: I0122 05:38:35.168573 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"701ff82d-a122-4606-a252-0554e0dd1cab","Type":"ContainerStarted","Data":"194b3bbdb89d6cb41eda46cd15c2940dee3f9909a834a72ed09eb0ca02cc8064"} Jan 22 05:38:35 crc kubenswrapper[4814]: I0122 05:38:35.168593 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:38:35 crc kubenswrapper[4814]: I0122 05:38:35.168873 4814 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 22 05:38:35 crc kubenswrapper[4814]: W0122 05:38:35.193285 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ad45f3f_cafc_4429_9042_88395b7a31dc.slice/crio-3f53e0e5b3bae0e2831aaf1f21083c0bf3136809722055b95df344caefb7c93c WatchSource:0}: Error finding container 3f53e0e5b3bae0e2831aaf1f21083c0bf3136809722055b95df344caefb7c93c: Status 404 returned error can't find the container with id 3f53e0e5b3bae0e2831aaf1f21083c0bf3136809722055b95df344caefb7c93c Jan 22 05:38:35 crc kubenswrapper[4814]: I0122 05:38:35.193712 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:35 crc kubenswrapper[4814]: I0122 05:38:35.877696 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:35 crc kubenswrapper[4814]: I0122 05:38:35.887136 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 22 05:38:36 crc kubenswrapper[4814]: I0122 05:38:36.178815 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerStarted","Data":"45bb9c5f5a53c97384cfeece223983d0f2308a4f5a73461c32634753d4408bbc"} Jan 22 05:38:36 crc kubenswrapper[4814]: I0122 05:38:36.179036 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerStarted","Data":"3f53e0e5b3bae0e2831aaf1f21083c0bf3136809722055b95df344caefb7c93c"} Jan 22 05:38:36 crc kubenswrapper[4814]: I0122 05:38:36.181640 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"701ff82d-a122-4606-a252-0554e0dd1cab","Type":"ContainerStarted","Data":"6f0067a72b69f5eb6a4d85049cc969224a059ee7abcdc747aa4813adff51b6f1"} Jan 22 05:38:36 crc kubenswrapper[4814]: I0122 05:38:36.181674 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:36 crc kubenswrapper[4814]: I0122 05:38:36.200226 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.2002078210000002 podStartE2EDuration="2.200207821s" podCreationTimestamp="2026-01-22 05:38:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:38:36.197069173 +0000 UTC m=+1202.280557388" watchObservedRunningTime="2026-01-22 05:38:36.200207821 +0000 UTC m=+1202.283696036" Jan 22 05:38:37 crc kubenswrapper[4814]: I0122 05:38:37.193916 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerStarted","Data":"f21b7eff2a3956caf4c2a0a6cb958c710306dd5a5c20a22cbe4ff535760707c4"} Jan 22 05:38:38 crc kubenswrapper[4814]: I0122 05:38:38.203054 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerStarted","Data":"79c55fc92f3cf3e8a44aebca5e87aad9648cc1f5c0cc819755606cb7b64a462e"} Jan 22 05:38:39 crc kubenswrapper[4814]: I0122 05:38:39.215134 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerStarted","Data":"ed1abdf6b3074568111fae9b779fcd44e390568ed77c4a19b28dec66ef0f8f69"} Jan 22 05:38:39 crc kubenswrapper[4814]: I0122 05:38:39.215779 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:38:39 crc kubenswrapper[4814]: I0122 05:38:39.250050 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.5210149309999998 podStartE2EDuration="5.250033783s" podCreationTimestamp="2026-01-22 05:38:34 +0000 UTC" firstStartedPulling="2026-01-22 05:38:35.196514725 +0000 UTC m=+1201.280002940" lastFinishedPulling="2026-01-22 05:38:38.925533567 +0000 UTC m=+1205.009021792" observedRunningTime="2026-01-22 05:38:39.244681235 +0000 UTC m=+1205.328169450" watchObservedRunningTime="2026-01-22 05:38:39.250033783 +0000 UTC m=+1205.333521998" Jan 22 05:38:44 crc kubenswrapper[4814]: I0122 05:38:44.616140 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 22 05:38:44 crc kubenswrapper[4814]: I0122 05:38:44.732445 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:44 crc kubenswrapper[4814]: I0122 05:38:44.732671 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="ceilometer-central-agent" containerID="cri-o://45bb9c5f5a53c97384cfeece223983d0f2308a4f5a73461c32634753d4408bbc" gracePeriod=30 Jan 22 05:38:44 crc kubenswrapper[4814]: I0122 05:38:44.732693 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="sg-core" containerID="cri-o://79c55fc92f3cf3e8a44aebca5e87aad9648cc1f5c0cc819755606cb7b64a462e" gracePeriod=30 Jan 22 05:38:44 crc kubenswrapper[4814]: I0122 05:38:44.732697 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="proxy-httpd" containerID="cri-o://ed1abdf6b3074568111fae9b779fcd44e390568ed77c4a19b28dec66ef0f8f69" gracePeriod=30 Jan 22 05:38:44 crc kubenswrapper[4814]: I0122 05:38:44.732770 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="ceilometer-notification-agent" containerID="cri-o://f21b7eff2a3956caf4c2a0a6cb958c710306dd5a5c20a22cbe4ff535760707c4" gracePeriod=30 Jan 22 05:38:44 crc kubenswrapper[4814]: E0122 05:38:44.940039 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ad45f3f_cafc_4429_9042_88395b7a31dc.slice/crio-79c55fc92f3cf3e8a44aebca5e87aad9648cc1f5c0cc819755606cb7b64a462e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ad45f3f_cafc_4429_9042_88395b7a31dc.slice/crio-conmon-79c55fc92f3cf3e8a44aebca5e87aad9648cc1f5c0cc819755606cb7b64a462e.scope\": RecentStats: unable to find data in memory cache]" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.206051 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-57rkp"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.207285 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.209922 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.210088 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.215374 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-57rkp"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.317426 4814 generic.go:334] "Generic (PLEG): container finished" podID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerID="ed1abdf6b3074568111fae9b779fcd44e390568ed77c4a19b28dec66ef0f8f69" exitCode=0 Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.317456 4814 generic.go:334] "Generic (PLEG): container finished" podID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerID="79c55fc92f3cf3e8a44aebca5e87aad9648cc1f5c0cc819755606cb7b64a462e" exitCode=2 Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.317463 4814 generic.go:334] "Generic (PLEG): container finished" podID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerID="f21b7eff2a3956caf4c2a0a6cb958c710306dd5a5c20a22cbe4ff535760707c4" exitCode=0 Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.317472 4814 generic.go:334] "Generic (PLEG): container finished" podID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerID="45bb9c5f5a53c97384cfeece223983d0f2308a4f5a73461c32634753d4408bbc" exitCode=0 Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.317492 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerDied","Data":"ed1abdf6b3074568111fae9b779fcd44e390568ed77c4a19b28dec66ef0f8f69"} Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.317517 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerDied","Data":"79c55fc92f3cf3e8a44aebca5e87aad9648cc1f5c0cc819755606cb7b64a462e"} Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.317527 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerDied","Data":"f21b7eff2a3956caf4c2a0a6cb958c710306dd5a5c20a22cbe4ff535760707c4"} Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.317535 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerDied","Data":"45bb9c5f5a53c97384cfeece223983d0f2308a4f5a73461c32634753d4408bbc"} Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.351490 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-config-data\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.351553 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.351597 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-scripts\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.351679 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khngk\" (UniqueName: \"kubernetes.io/projected/76b670b3-c66f-4b78-a355-951299de4283-kube-api-access-khngk\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.404906 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.406106 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.416368 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.416942 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.418494 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.432742 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.447564 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.456672 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khngk\" (UniqueName: \"kubernetes.io/projected/76b670b3-c66f-4b78-a355-951299de4283-kube-api-access-khngk\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.456773 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-config-data\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.456812 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.456849 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-scripts\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.470019 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-scripts\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.484232 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.489093 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.496217 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-config-data\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.561525 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.561574 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-config-data\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.561661 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.561720 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-logs\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.561751 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.561808 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtjtd\" (UniqueName: \"kubernetes.io/projected/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-kube-api-access-jtjtd\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.561830 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncx8v\" (UniqueName: \"kubernetes.io/projected/557eb2ea-3709-4556-a2e2-03df0c6f955b-kube-api-access-ncx8v\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.575722 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.576312 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khngk\" (UniqueName: \"kubernetes.io/projected/76b670b3-c66f-4b78-a355-951299de4283-kube-api-access-khngk\") pod \"nova-cell0-cell-mapping-57rkp\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.578003 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.579684 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.580899 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.590854 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.597137 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.597314 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.606239 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.617485 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682373 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-logs\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682440 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682478 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-config-data\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682502 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-config-data\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682558 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtjtd\" (UniqueName: \"kubernetes.io/projected/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-kube-api-access-jtjtd\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682581 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncx8v\" (UniqueName: \"kubernetes.io/projected/557eb2ea-3709-4556-a2e2-03df0c6f955b-kube-api-access-ncx8v\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682608 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682676 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682693 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682712 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-config-data\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682739 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7jcw\" (UniqueName: \"kubernetes.io/projected/17c96d00-7f5e-4e37-b29e-408a90b6dece-kube-api-access-l7jcw\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682767 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682785 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvrcn\" (UniqueName: \"kubernetes.io/projected/b67bb7d5-4901-451f-8509-f274e2d3124b-kube-api-access-rvrcn\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.682818 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b67bb7d5-4901-451f-8509-f274e2d3124b-logs\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.683275 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-logs\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.691262 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.696587 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.696953 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-config-data\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.721558 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncx8v\" (UniqueName: \"kubernetes.io/projected/557eb2ea-3709-4556-a2e2-03df0c6f955b-kube-api-access-ncx8v\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.727078 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtjtd\" (UniqueName: \"kubernetes.io/projected/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-kube-api-access-jtjtd\") pod \"nova-api-0\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.731362 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.738003 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.762310 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.785021 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-config-data\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.785094 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.785142 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.785168 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7jcw\" (UniqueName: \"kubernetes.io/projected/17c96d00-7f5e-4e37-b29e-408a90b6dece-kube-api-access-l7jcw\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.785208 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvrcn\" (UniqueName: \"kubernetes.io/projected/b67bb7d5-4901-451f-8509-f274e2d3124b-kube-api-access-rvrcn\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.785238 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b67bb7d5-4901-451f-8509-f274e2d3124b-logs\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.785281 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-config-data\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.793188 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b67bb7d5-4901-451f-8509-f274e2d3124b-logs\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.793614 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-config-data\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.798933 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-8867v"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.800409 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.829920 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvrcn\" (UniqueName: \"kubernetes.io/projected/b67bb7d5-4901-451f-8509-f274e2d3124b-kube-api-access-rvrcn\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.833405 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.836170 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7jcw\" (UniqueName: \"kubernetes.io/projected/17c96d00-7f5e-4e37-b29e-408a90b6dece-kube-api-access-l7jcw\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.836902 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.836972 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-config-data\") pod \"nova-scheduler-0\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.895061 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-8867v"] Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.896308 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-config\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.896362 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gg74r\" (UniqueName: \"kubernetes.io/projected/a156fc49-5ab5-47b4-833d-a92705928a35-kube-api-access-gg74r\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.896456 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.896473 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.896490 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.896525 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.916412 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.947938 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.999316 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.999360 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.999380 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.999425 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.999470 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-config\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:45 crc kubenswrapper[4814]: I0122 05:38:45.999500 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gg74r\" (UniqueName: \"kubernetes.io/projected/a156fc49-5ab5-47b4-833d-a92705928a35-kube-api-access-gg74r\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.001018 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.001582 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.006967 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.007091 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-config\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.007858 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.037986 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gg74r\" (UniqueName: \"kubernetes.io/projected/a156fc49-5ab5-47b4-833d-a92705928a35-kube-api-access-gg74r\") pod \"dnsmasq-dns-5fbc4d444f-8867v\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.141436 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.160311 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.310653 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzk4s\" (UniqueName: \"kubernetes.io/projected/2ad45f3f-cafc-4429-9042-88395b7a31dc-kube-api-access-bzk4s\") pod \"2ad45f3f-cafc-4429-9042-88395b7a31dc\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.310701 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-scripts\") pod \"2ad45f3f-cafc-4429-9042-88395b7a31dc\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.310738 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-run-httpd\") pod \"2ad45f3f-cafc-4429-9042-88395b7a31dc\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.310783 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-combined-ca-bundle\") pod \"2ad45f3f-cafc-4429-9042-88395b7a31dc\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.310829 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-log-httpd\") pod \"2ad45f3f-cafc-4429-9042-88395b7a31dc\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.310864 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-sg-core-conf-yaml\") pod \"2ad45f3f-cafc-4429-9042-88395b7a31dc\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.310976 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-config-data\") pod \"2ad45f3f-cafc-4429-9042-88395b7a31dc\" (UID: \"2ad45f3f-cafc-4429-9042-88395b7a31dc\") " Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.313961 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2ad45f3f-cafc-4429-9042-88395b7a31dc" (UID: "2ad45f3f-cafc-4429-9042-88395b7a31dc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.313978 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2ad45f3f-cafc-4429-9042-88395b7a31dc" (UID: "2ad45f3f-cafc-4429-9042-88395b7a31dc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.314947 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.314975 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ad45f3f-cafc-4429-9042-88395b7a31dc-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.315290 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ad45f3f-cafc-4429-9042-88395b7a31dc-kube-api-access-bzk4s" (OuterVolumeSpecName: "kube-api-access-bzk4s") pod "2ad45f3f-cafc-4429-9042-88395b7a31dc" (UID: "2ad45f3f-cafc-4429-9042-88395b7a31dc"). InnerVolumeSpecName "kube-api-access-bzk4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.356313 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-scripts" (OuterVolumeSpecName: "scripts") pod "2ad45f3f-cafc-4429-9042-88395b7a31dc" (UID: "2ad45f3f-cafc-4429-9042-88395b7a31dc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.370532 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ad45f3f-cafc-4429-9042-88395b7a31dc","Type":"ContainerDied","Data":"3f53e0e5b3bae0e2831aaf1f21083c0bf3136809722055b95df344caefb7c93c"} Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.370580 4814 scope.go:117] "RemoveContainer" containerID="ed1abdf6b3074568111fae9b779fcd44e390568ed77c4a19b28dec66ef0f8f69" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.370724 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.403773 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2ad45f3f-cafc-4429-9042-88395b7a31dc" (UID: "2ad45f3f-cafc-4429-9042-88395b7a31dc"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.412334 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-57rkp"] Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.416406 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.416433 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzk4s\" (UniqueName: \"kubernetes.io/projected/2ad45f3f-cafc-4429-9042-88395b7a31dc-kube-api-access-bzk4s\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.416443 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.449561 4814 scope.go:117] "RemoveContainer" containerID="79c55fc92f3cf3e8a44aebca5e87aad9648cc1f5c0cc819755606cb7b64a462e" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.470001 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ad45f3f-cafc-4429-9042-88395b7a31dc" (UID: "2ad45f3f-cafc-4429-9042-88395b7a31dc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.491443 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-config-data" (OuterVolumeSpecName: "config-data") pod "2ad45f3f-cafc-4429-9042-88395b7a31dc" (UID: "2ad45f3f-cafc-4429-9042-88395b7a31dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.501053 4814 scope.go:117] "RemoveContainer" containerID="f21b7eff2a3956caf4c2a0a6cb958c710306dd5a5c20a22cbe4ff535760707c4" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.523943 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.523988 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad45f3f-cafc-4429-9042-88395b7a31dc-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.548883 4814 scope.go:117] "RemoveContainer" containerID="45bb9c5f5a53c97384cfeece223983d0f2308a4f5a73461c32634753d4408bbc" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.603912 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.690176 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.734385 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.749545 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.776790 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.783608 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:46 crc kubenswrapper[4814]: E0122 05:38:46.784049 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="ceilometer-central-agent" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.784064 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="ceilometer-central-agent" Jan 22 05:38:46 crc kubenswrapper[4814]: E0122 05:38:46.784085 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="sg-core" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.784091 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="sg-core" Jan 22 05:38:46 crc kubenswrapper[4814]: E0122 05:38:46.784103 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="proxy-httpd" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.784109 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="proxy-httpd" Jan 22 05:38:46 crc kubenswrapper[4814]: E0122 05:38:46.784121 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="ceilometer-notification-agent" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.784127 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="ceilometer-notification-agent" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.784296 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="ceilometer-notification-agent" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.784308 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="ceilometer-central-agent" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.784319 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="sg-core" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.784340 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" containerName="proxy-httpd" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.786229 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.788878 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.791746 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.802181 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:46 crc kubenswrapper[4814]: W0122 05:38:46.858206 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb67bb7d5_4901_451f_8509_f274e2d3124b.slice/crio-9ea99c67d9125cff621f09e93c4d3e51e2ab218e9b7bda0329a71c4b83766e6c WatchSource:0}: Error finding container 9ea99c67d9125cff621f09e93c4d3e51e2ab218e9b7bda0329a71c4b83766e6c: Status 404 returned error can't find the container with id 9ea99c67d9125cff621f09e93c4d3e51e2ab218e9b7bda0329a71c4b83766e6c Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.865772 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.935003 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lftp\" (UniqueName: \"kubernetes.io/projected/00d60555-cd25-4759-bcd0-ecd90c911a21-kube-api-access-4lftp\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.935062 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-scripts\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.935157 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-config-data\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.935223 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.935286 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.935355 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-log-httpd\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.935455 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-run-httpd\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:46 crc kubenswrapper[4814]: I0122 05:38:46.946115 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-8867v"] Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.036722 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-run-httpd\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.036796 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lftp\" (UniqueName: \"kubernetes.io/projected/00d60555-cd25-4759-bcd0-ecd90c911a21-kube-api-access-4lftp\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.036818 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-scripts\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.036851 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-config-data\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.036888 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.036917 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.036946 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-log-httpd\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.037366 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-log-httpd\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.037599 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-run-httpd\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.043564 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.043656 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-config-data\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.044134 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.044900 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-scripts\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.052756 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lftp\" (UniqueName: \"kubernetes.io/projected/00d60555-cd25-4759-bcd0-ecd90c911a21-kube-api-access-4lftp\") pod \"ceilometer-0\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.104079 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.122437 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-46576"] Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.123704 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.129322 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.129907 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.142964 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-46576"] Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.245082 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-scripts\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.245171 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.245223 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6z46\" (UniqueName: \"kubernetes.io/projected/e7881f3e-36b2-4a90-85be-291e584e8e56-kube-api-access-v6z46\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.245246 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-config-data\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.356756 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.356824 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6z46\" (UniqueName: \"kubernetes.io/projected/e7881f3e-36b2-4a90-85be-291e584e8e56-kube-api-access-v6z46\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.356851 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-config-data\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.356916 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-scripts\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.365288 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-config-data\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.366332 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.367008 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-scripts\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.390160 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6z46\" (UniqueName: \"kubernetes.io/projected/e7881f3e-36b2-4a90-85be-291e584e8e56-kube-api-access-v6z46\") pod \"nova-cell1-conductor-db-sync-46576\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.393663 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-57rkp" event={"ID":"76b670b3-c66f-4b78-a355-951299de4283","Type":"ContainerStarted","Data":"8822399f550af14f4cf7ad7419db2561540311632754834741591b345125c9ac"} Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.393704 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-57rkp" event={"ID":"76b670b3-c66f-4b78-a355-951299de4283","Type":"ContainerStarted","Data":"8a807ab2572df7812371ffb30c5531d746e79bab85c8d97959fddb07a976b6e1"} Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.397944 4814 generic.go:334] "Generic (PLEG): container finished" podID="a156fc49-5ab5-47b4-833d-a92705928a35" containerID="d0f9672ac5ac2db380dc9aac16c98076b696fb50073cec5e63bb229096c24724" exitCode=0 Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.399133 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" event={"ID":"a156fc49-5ab5-47b4-833d-a92705928a35","Type":"ContainerDied","Data":"d0f9672ac5ac2db380dc9aac16c98076b696fb50073cec5e63bb229096c24724"} Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.399247 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" event={"ID":"a156fc49-5ab5-47b4-833d-a92705928a35","Type":"ContainerStarted","Data":"f6ffd6c1450a996ef382aa841f08dc2dfb61757ba5d0a301f0400b1148309aa9"} Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.408930 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"557eb2ea-3709-4556-a2e2-03df0c6f955b","Type":"ContainerStarted","Data":"a39af41c08cfbcf97c40e4d9e2e1da1e5ba5c98ad6d656ee3b1e17914ee5a198"} Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.412006 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b67bb7d5-4901-451f-8509-f274e2d3124b","Type":"ContainerStarted","Data":"9ea99c67d9125cff621f09e93c4d3e51e2ab218e9b7bda0329a71c4b83766e6c"} Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.437365 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-57rkp" podStartSLOduration=2.437347782 podStartE2EDuration="2.437347782s" podCreationTimestamp="2026-01-22 05:38:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:38:47.407494295 +0000 UTC m=+1213.490982510" watchObservedRunningTime="2026-01-22 05:38:47.437347782 +0000 UTC m=+1213.520835997" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.446649 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.461394 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b","Type":"ContainerStarted","Data":"ebc745dbbf2f13a9b723f82271a80ca984ef48cb69738078df17f770d9322dab"} Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.469677 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17c96d00-7f5e-4e37-b29e-408a90b6dece","Type":"ContainerStarted","Data":"4683c743a8fe660d1deee3eb883f702680ab9a5dcf0a7d30fd8f784ee749fed0"} Jan 22 05:38:47 crc kubenswrapper[4814]: I0122 05:38:47.758454 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:38:47 crc kubenswrapper[4814]: W0122 05:38:47.781796 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00d60555_cd25_4759_bcd0_ecd90c911a21.slice/crio-492e6eaa02c38ec111d5f1e8ad5738a0020a057eb420bd84bd87b3434e0c9f41 WatchSource:0}: Error finding container 492e6eaa02c38ec111d5f1e8ad5738a0020a057eb420bd84bd87b3434e0c9f41: Status 404 returned error can't find the container with id 492e6eaa02c38ec111d5f1e8ad5738a0020a057eb420bd84bd87b3434e0c9f41 Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.099199 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-46576"] Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.356763 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ad45f3f-cafc-4429-9042-88395b7a31dc" path="/var/lib/kubelet/pods/2ad45f3f-cafc-4429-9042-88395b7a31dc/volumes" Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.491246 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" event={"ID":"a156fc49-5ab5-47b4-833d-a92705928a35","Type":"ContainerStarted","Data":"7c4fdf339839ded0807c29e105234db308fc053c4c275b341026b454d5f96478"} Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.491944 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.495197 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerStarted","Data":"492e6eaa02c38ec111d5f1e8ad5738a0020a057eb420bd84bd87b3434e0c9f41"} Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.496886 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-46576" event={"ID":"e7881f3e-36b2-4a90-85be-291e584e8e56","Type":"ContainerStarted","Data":"f8cd8b6a23fdcc669e4ac28d7c80894a0d4d6c75a2f654ec2106e72869d0288e"} Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.496905 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-46576" event={"ID":"e7881f3e-36b2-4a90-85be-291e584e8e56","Type":"ContainerStarted","Data":"b6dbcb1974c7ac4c7fc7bfd875a02f6ce8235b425be877215e71090bbb402d1e"} Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.534466 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" podStartSLOduration=3.534449719 podStartE2EDuration="3.534449719s" podCreationTimestamp="2026-01-22 05:38:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:38:48.517314441 +0000 UTC m=+1214.600802656" watchObservedRunningTime="2026-01-22 05:38:48.534449719 +0000 UTC m=+1214.617937934" Jan 22 05:38:48 crc kubenswrapper[4814]: I0122 05:38:48.542207 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-46576" podStartSLOduration=1.5421900320000002 podStartE2EDuration="1.542190032s" podCreationTimestamp="2026-01-22 05:38:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:38:48.533776798 +0000 UTC m=+1214.617265003" watchObservedRunningTime="2026-01-22 05:38:48.542190032 +0000 UTC m=+1214.625678247" Jan 22 05:38:49 crc kubenswrapper[4814]: I0122 05:38:49.350655 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:49 crc kubenswrapper[4814]: I0122 05:38:49.368243 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:38:49 crc kubenswrapper[4814]: I0122 05:38:49.505579 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerStarted","Data":"3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.533290 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b67bb7d5-4901-451f-8509-f274e2d3124b","Type":"ContainerStarted","Data":"c9e18213160b6ff64567d2f0b984472b4da06239c71bf9a0b7c2aa8b1f8bfd0a"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.533340 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerName="nova-metadata-log" containerID="cri-o://d96df5a750cdac9bc537a894c31db6553bd3a66088c31bb1a3877b190e6766ac" gracePeriod=30 Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.533385 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerName="nova-metadata-metadata" containerID="cri-o://c9e18213160b6ff64567d2f0b984472b4da06239c71bf9a0b7c2aa8b1f8bfd0a" gracePeriod=30 Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.533787 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b67bb7d5-4901-451f-8509-f274e2d3124b","Type":"ContainerStarted","Data":"d96df5a750cdac9bc537a894c31db6553bd3a66088c31bb1a3877b190e6766ac"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.536035 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b","Type":"ContainerStarted","Data":"c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.536071 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b","Type":"ContainerStarted","Data":"d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.537887 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17c96d00-7f5e-4e37-b29e-408a90b6dece","Type":"ContainerStarted","Data":"c74b2f9f63a6b177914d1411b576e3923bc616a3f9cbcebec8acccf3a37d70e8"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.540364 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerStarted","Data":"5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.540387 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerStarted","Data":"3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.542480 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"557eb2ea-3709-4556-a2e2-03df0c6f955b","Type":"ContainerStarted","Data":"b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819"} Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.542590 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="557eb2ea-3709-4556-a2e2-03df0c6f955b" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819" gracePeriod=30 Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.606324 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.118822623 podStartE2EDuration="7.606304693s" podCreationTimestamp="2026-01-22 05:38:45 +0000 UTC" firstStartedPulling="2026-01-22 05:38:46.71825622 +0000 UTC m=+1212.801744435" lastFinishedPulling="2026-01-22 05:38:51.20573829 +0000 UTC m=+1217.289226505" observedRunningTime="2026-01-22 05:38:52.605617961 +0000 UTC m=+1218.689106176" watchObservedRunningTime="2026-01-22 05:38:52.606304693 +0000 UTC m=+1218.689792908" Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.647060 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.304637946 podStartE2EDuration="7.647037421s" podCreationTimestamp="2026-01-22 05:38:45 +0000 UTC" firstStartedPulling="2026-01-22 05:38:46.861526817 +0000 UTC m=+1212.945015032" lastFinishedPulling="2026-01-22 05:38:51.203926292 +0000 UTC m=+1217.287414507" observedRunningTime="2026-01-22 05:38:52.586944095 +0000 UTC m=+1218.670432310" watchObservedRunningTime="2026-01-22 05:38:52.647037421 +0000 UTC m=+1218.730525626" Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.664664 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.194785507 podStartE2EDuration="7.664644854s" podCreationTimestamp="2026-01-22 05:38:45 +0000 UTC" firstStartedPulling="2026-01-22 05:38:46.734555661 +0000 UTC m=+1212.818043876" lastFinishedPulling="2026-01-22 05:38:51.204415008 +0000 UTC m=+1217.287903223" observedRunningTime="2026-01-22 05:38:52.633146435 +0000 UTC m=+1218.716634650" watchObservedRunningTime="2026-01-22 05:38:52.664644854 +0000 UTC m=+1218.748133069" Jan 22 05:38:52 crc kubenswrapper[4814]: I0122 05:38:52.679784 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.077414022 podStartE2EDuration="7.679769858s" podCreationTimestamp="2026-01-22 05:38:45 +0000 UTC" firstStartedPulling="2026-01-22 05:38:46.602506886 +0000 UTC m=+1212.685995101" lastFinishedPulling="2026-01-22 05:38:51.204862692 +0000 UTC m=+1217.288350937" observedRunningTime="2026-01-22 05:38:52.674168773 +0000 UTC m=+1218.757656988" watchObservedRunningTime="2026-01-22 05:38:52.679769858 +0000 UTC m=+1218.763258073" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.553298 4814 generic.go:334] "Generic (PLEG): container finished" podID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerID="c9e18213160b6ff64567d2f0b984472b4da06239c71bf9a0b7c2aa8b1f8bfd0a" exitCode=0 Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.553498 4814 generic.go:334] "Generic (PLEG): container finished" podID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerID="d96df5a750cdac9bc537a894c31db6553bd3a66088c31bb1a3877b190e6766ac" exitCode=143 Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.553401 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b67bb7d5-4901-451f-8509-f274e2d3124b","Type":"ContainerDied","Data":"c9e18213160b6ff64567d2f0b984472b4da06239c71bf9a0b7c2aa8b1f8bfd0a"} Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.553582 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b67bb7d5-4901-451f-8509-f274e2d3124b","Type":"ContainerDied","Data":"d96df5a750cdac9bc537a894c31db6553bd3a66088c31bb1a3877b190e6766ac"} Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.553612 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b67bb7d5-4901-451f-8509-f274e2d3124b","Type":"ContainerDied","Data":"9ea99c67d9125cff621f09e93c4d3e51e2ab218e9b7bda0329a71c4b83766e6c"} Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.553633 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ea99c67d9125cff621f09e93c4d3e51e2ab218e9b7bda0329a71c4b83766e6c" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.568279 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.654397 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-combined-ca-bundle\") pod \"b67bb7d5-4901-451f-8509-f274e2d3124b\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.654935 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-config-data\") pod \"b67bb7d5-4901-451f-8509-f274e2d3124b\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.655209 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b67bb7d5-4901-451f-8509-f274e2d3124b-logs\") pod \"b67bb7d5-4901-451f-8509-f274e2d3124b\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.655340 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvrcn\" (UniqueName: \"kubernetes.io/projected/b67bb7d5-4901-451f-8509-f274e2d3124b-kube-api-access-rvrcn\") pod \"b67bb7d5-4901-451f-8509-f274e2d3124b\" (UID: \"b67bb7d5-4901-451f-8509-f274e2d3124b\") " Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.655655 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b67bb7d5-4901-451f-8509-f274e2d3124b-logs" (OuterVolumeSpecName: "logs") pod "b67bb7d5-4901-451f-8509-f274e2d3124b" (UID: "b67bb7d5-4901-451f-8509-f274e2d3124b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.656367 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b67bb7d5-4901-451f-8509-f274e2d3124b-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.662975 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b67bb7d5-4901-451f-8509-f274e2d3124b-kube-api-access-rvrcn" (OuterVolumeSpecName: "kube-api-access-rvrcn") pod "b67bb7d5-4901-451f-8509-f274e2d3124b" (UID: "b67bb7d5-4901-451f-8509-f274e2d3124b"). InnerVolumeSpecName "kube-api-access-rvrcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.686093 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b67bb7d5-4901-451f-8509-f274e2d3124b" (UID: "b67bb7d5-4901-451f-8509-f274e2d3124b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.687111 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-config-data" (OuterVolumeSpecName: "config-data") pod "b67bb7d5-4901-451f-8509-f274e2d3124b" (UID: "b67bb7d5-4901-451f-8509-f274e2d3124b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.758798 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.758833 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b67bb7d5-4901-451f-8509-f274e2d3124b-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:53 crc kubenswrapper[4814]: I0122 05:38:53.758868 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvrcn\" (UniqueName: \"kubernetes.io/projected/b67bb7d5-4901-451f-8509-f274e2d3124b-kube-api-access-rvrcn\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.564834 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.564956 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerStarted","Data":"626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248"} Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.566373 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.595005 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.963541458 podStartE2EDuration="8.594986386s" podCreationTimestamp="2026-01-22 05:38:46 +0000 UTC" firstStartedPulling="2026-01-22 05:38:47.789379192 +0000 UTC m=+1213.872867397" lastFinishedPulling="2026-01-22 05:38:53.42082411 +0000 UTC m=+1219.504312325" observedRunningTime="2026-01-22 05:38:54.589212395 +0000 UTC m=+1220.672700610" watchObservedRunningTime="2026-01-22 05:38:54.594986386 +0000 UTC m=+1220.678474601" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.630240 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.665216 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.679254 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:54 crc kubenswrapper[4814]: E0122 05:38:54.679748 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerName="nova-metadata-log" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.679783 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerName="nova-metadata-log" Jan 22 05:38:54 crc kubenswrapper[4814]: E0122 05:38:54.679859 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerName="nova-metadata-metadata" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.679876 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerName="nova-metadata-metadata" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.680042 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerName="nova-metadata-log" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.680070 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" containerName="nova-metadata-metadata" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.681029 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.683859 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.685343 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.698609 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.888133 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c301d8a-5a53-4a62-8fd7-ba9091e38504-logs\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.888565 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.888850 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx244\" (UniqueName: \"kubernetes.io/projected/9c301d8a-5a53-4a62-8fd7-ba9091e38504-kube-api-access-cx244\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.889078 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.889190 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-config-data\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.990572 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx244\" (UniqueName: \"kubernetes.io/projected/9c301d8a-5a53-4a62-8fd7-ba9091e38504-kube-api-access-cx244\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.990659 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.990693 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-config-data\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.990734 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c301d8a-5a53-4a62-8fd7-ba9091e38504-logs\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.990796 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.991308 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c301d8a-5a53-4a62-8fd7-ba9091e38504-logs\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.995649 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.996396 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-config-data\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:54 crc kubenswrapper[4814]: I0122 05:38:54.998545 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:55 crc kubenswrapper[4814]: I0122 05:38:55.010613 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx244\" (UniqueName: \"kubernetes.io/projected/9c301d8a-5a53-4a62-8fd7-ba9091e38504-kube-api-access-cx244\") pod \"nova-metadata-0\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " pod="openstack/nova-metadata-0" Jan 22 05:38:55 crc kubenswrapper[4814]: I0122 05:38:55.306339 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:38:55 crc kubenswrapper[4814]: I0122 05:38:55.738246 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:38:55 crc kubenswrapper[4814]: I0122 05:38:55.763717 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 05:38:55 crc kubenswrapper[4814]: I0122 05:38:55.763753 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 05:38:55 crc kubenswrapper[4814]: I0122 05:38:55.779167 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:55 crc kubenswrapper[4814]: W0122 05:38:55.788760 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c301d8a_5a53_4a62_8fd7_ba9091e38504.slice/crio-8d7b8cc8c2c6cca269405583776232768f71f7867a961314574f6cfd524a208c WatchSource:0}: Error finding container 8d7b8cc8c2c6cca269405583776232768f71f7867a961314574f6cfd524a208c: Status 404 returned error can't find the container with id 8d7b8cc8c2c6cca269405583776232768f71f7867a961314574f6cfd524a208c Jan 22 05:38:55 crc kubenswrapper[4814]: I0122 05:38:55.948689 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 05:38:55 crc kubenswrapper[4814]: I0122 05:38:55.948739 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.005131 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.143353 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.230782 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-ffwm5"] Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.231036 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" podUID="c435e1dd-d906-4003-94cd-e78a57e0ab26" containerName="dnsmasq-dns" containerID="cri-o://f86ec37b174f5629fedeb750d0c34c62dbd8e515b941b2b20d00be9d08c2ab0e" gracePeriod=10 Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.360159 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b67bb7d5-4901-451f-8509-f274e2d3124b" path="/var/lib/kubelet/pods/b67bb7d5-4901-451f-8509-f274e2d3124b/volumes" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.590991 4814 generic.go:334] "Generic (PLEG): container finished" podID="76b670b3-c66f-4b78-a355-951299de4283" containerID="8822399f550af14f4cf7ad7419db2561540311632754834741591b345125c9ac" exitCode=0 Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.591041 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-57rkp" event={"ID":"76b670b3-c66f-4b78-a355-951299de4283","Type":"ContainerDied","Data":"8822399f550af14f4cf7ad7419db2561540311632754834741591b345125c9ac"} Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.603505 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c301d8a-5a53-4a62-8fd7-ba9091e38504","Type":"ContainerStarted","Data":"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b"} Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.603529 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c301d8a-5a53-4a62-8fd7-ba9091e38504","Type":"ContainerStarted","Data":"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6"} Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.603540 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c301d8a-5a53-4a62-8fd7-ba9091e38504","Type":"ContainerStarted","Data":"8d7b8cc8c2c6cca269405583776232768f71f7867a961314574f6cfd524a208c"} Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.612129 4814 generic.go:334] "Generic (PLEG): container finished" podID="c435e1dd-d906-4003-94cd-e78a57e0ab26" containerID="f86ec37b174f5629fedeb750d0c34c62dbd8e515b941b2b20d00be9d08c2ab0e" exitCode=0 Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.612273 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" event={"ID":"c435e1dd-d906-4003-94cd-e78a57e0ab26","Type":"ContainerDied","Data":"f86ec37b174f5629fedeb750d0c34c62dbd8e515b941b2b20d00be9d08c2ab0e"} Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.634794 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.634777245 podStartE2EDuration="2.634777245s" podCreationTimestamp="2026-01-22 05:38:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:38:56.627364621 +0000 UTC m=+1222.710852836" watchObservedRunningTime="2026-01-22 05:38:56.634777245 +0000 UTC m=+1222.718265460" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.687091 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.844394 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.848475 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.201:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.848768 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.201:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.969269 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-nb\") pod \"c435e1dd-d906-4003-94cd-e78a57e0ab26\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.969457 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-sb\") pod \"c435e1dd-d906-4003-94cd-e78a57e0ab26\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.969493 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-swift-storage-0\") pod \"c435e1dd-d906-4003-94cd-e78a57e0ab26\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.969515 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-svc\") pod \"c435e1dd-d906-4003-94cd-e78a57e0ab26\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.969553 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cq9w\" (UniqueName: \"kubernetes.io/projected/c435e1dd-d906-4003-94cd-e78a57e0ab26-kube-api-access-7cq9w\") pod \"c435e1dd-d906-4003-94cd-e78a57e0ab26\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.969592 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-config\") pod \"c435e1dd-d906-4003-94cd-e78a57e0ab26\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " Jan 22 05:38:56 crc kubenswrapper[4814]: I0122 05:38:56.984287 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c435e1dd-d906-4003-94cd-e78a57e0ab26-kube-api-access-7cq9w" (OuterVolumeSpecName: "kube-api-access-7cq9w") pod "c435e1dd-d906-4003-94cd-e78a57e0ab26" (UID: "c435e1dd-d906-4003-94cd-e78a57e0ab26"). InnerVolumeSpecName "kube-api-access-7cq9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.050610 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c435e1dd-d906-4003-94cd-e78a57e0ab26" (UID: "c435e1dd-d906-4003-94cd-e78a57e0ab26"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.055227 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-config" (OuterVolumeSpecName: "config") pod "c435e1dd-d906-4003-94cd-e78a57e0ab26" (UID: "c435e1dd-d906-4003-94cd-e78a57e0ab26"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.063420 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c435e1dd-d906-4003-94cd-e78a57e0ab26" (UID: "c435e1dd-d906-4003-94cd-e78a57e0ab26"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.075664 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c435e1dd-d906-4003-94cd-e78a57e0ab26" (UID: "c435e1dd-d906-4003-94cd-e78a57e0ab26"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.077511 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-svc\") pod \"c435e1dd-d906-4003-94cd-e78a57e0ab26\" (UID: \"c435e1dd-d906-4003-94cd-e78a57e0ab26\") " Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.078166 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cq9w\" (UniqueName: \"kubernetes.io/projected/c435e1dd-d906-4003-94cd-e78a57e0ab26-kube-api-access-7cq9w\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.078180 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.078189 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.078221 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:57 crc kubenswrapper[4814]: W0122 05:38:57.078272 4814 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/c435e1dd-d906-4003-94cd-e78a57e0ab26/volumes/kubernetes.io~configmap/dns-svc Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.078304 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c435e1dd-d906-4003-94cd-e78a57e0ab26" (UID: "c435e1dd-d906-4003-94cd-e78a57e0ab26"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.097308 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c435e1dd-d906-4003-94cd-e78a57e0ab26" (UID: "c435e1dd-d906-4003-94cd-e78a57e0ab26"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.179799 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.179825 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c435e1dd-d906-4003-94cd-e78a57e0ab26-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.621759 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.621874 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-ffwm5" event={"ID":"c435e1dd-d906-4003-94cd-e78a57e0ab26","Type":"ContainerDied","Data":"ff10da929050437b985e77412f8b20694a372a13f57c643a1623a35618c6a9cd"} Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.621998 4814 scope.go:117] "RemoveContainer" containerID="f86ec37b174f5629fedeb750d0c34c62dbd8e515b941b2b20d00be9d08c2ab0e" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.658573 4814 scope.go:117] "RemoveContainer" containerID="a8c607f3d73e4c18355c67a40bfc4e5356343247d60471747d4f5f31e2d89459" Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.659078 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-ffwm5"] Jan 22 05:38:57 crc kubenswrapper[4814]: I0122 05:38:57.673269 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-ffwm5"] Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.155282 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.217535 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-combined-ca-bundle\") pod \"76b670b3-c66f-4b78-a355-951299de4283\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.217746 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-config-data\") pod \"76b670b3-c66f-4b78-a355-951299de4283\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.217942 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khngk\" (UniqueName: \"kubernetes.io/projected/76b670b3-c66f-4b78-a355-951299de4283-kube-api-access-khngk\") pod \"76b670b3-c66f-4b78-a355-951299de4283\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.218283 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-scripts\") pod \"76b670b3-c66f-4b78-a355-951299de4283\" (UID: \"76b670b3-c66f-4b78-a355-951299de4283\") " Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.252633 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-scripts" (OuterVolumeSpecName: "scripts") pod "76b670b3-c66f-4b78-a355-951299de4283" (UID: "76b670b3-c66f-4b78-a355-951299de4283"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.257096 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "76b670b3-c66f-4b78-a355-951299de4283" (UID: "76b670b3-c66f-4b78-a355-951299de4283"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.264829 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76b670b3-c66f-4b78-a355-951299de4283-kube-api-access-khngk" (OuterVolumeSpecName: "kube-api-access-khngk") pod "76b670b3-c66f-4b78-a355-951299de4283" (UID: "76b670b3-c66f-4b78-a355-951299de4283"). InnerVolumeSpecName "kube-api-access-khngk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.266158 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-config-data" (OuterVolumeSpecName: "config-data") pod "76b670b3-c66f-4b78-a355-951299de4283" (UID: "76b670b3-c66f-4b78-a355-951299de4283"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.322229 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.322258 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khngk\" (UniqueName: \"kubernetes.io/projected/76b670b3-c66f-4b78-a355-951299de4283-kube-api-access-khngk\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.322268 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.322277 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b670b3-c66f-4b78-a355-951299de4283-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.353567 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c435e1dd-d906-4003-94cd-e78a57e0ab26" path="/var/lib/kubelet/pods/c435e1dd-d906-4003-94cd-e78a57e0ab26/volumes" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.632609 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-57rkp" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.632981 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-57rkp" event={"ID":"76b670b3-c66f-4b78-a355-951299de4283","Type":"ContainerDied","Data":"8a807ab2572df7812371ffb30c5531d746e79bab85c8d97959fddb07a976b6e1"} Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.633018 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a807ab2572df7812371ffb30c5531d746e79bab85c8d97959fddb07a976b6e1" Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.634935 4814 generic.go:334] "Generic (PLEG): container finished" podID="e7881f3e-36b2-4a90-85be-291e584e8e56" containerID="f8cd8b6a23fdcc669e4ac28d7c80894a0d4d6c75a2f654ec2106e72869d0288e" exitCode=0 Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.634961 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-46576" event={"ID":"e7881f3e-36b2-4a90-85be-291e584e8e56","Type":"ContainerDied","Data":"f8cd8b6a23fdcc669e4ac28d7c80894a0d4d6c75a2f654ec2106e72869d0288e"} Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.780693 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.781175 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="17c96d00-7f5e-4e37-b29e-408a90b6dece" containerName="nova-scheduler-scheduler" containerID="cri-o://c74b2f9f63a6b177914d1411b576e3923bc616a3f9cbcebec8acccf3a37d70e8" gracePeriod=30 Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.787878 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.788079 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-log" containerID="cri-o://d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9" gracePeriod=30 Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.788189 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-api" containerID="cri-o://c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06" gracePeriod=30 Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.819389 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.819597 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerName="nova-metadata-log" containerID="cri-o://7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6" gracePeriod=30 Jan 22 05:38:58 crc kubenswrapper[4814]: I0122 05:38:58.819695 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerName="nova-metadata-metadata" containerID="cri-o://14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b" gracePeriod=30 Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.637782 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.644717 4814 generic.go:334] "Generic (PLEG): container finished" podID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerID="d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9" exitCode=143 Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.644798 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b","Type":"ContainerDied","Data":"d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9"} Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.647791 4814 generic.go:334] "Generic (PLEG): container finished" podID="17c96d00-7f5e-4e37-b29e-408a90b6dece" containerID="c74b2f9f63a6b177914d1411b576e3923bc616a3f9cbcebec8acccf3a37d70e8" exitCode=0 Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.647838 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17c96d00-7f5e-4e37-b29e-408a90b6dece","Type":"ContainerDied","Data":"c74b2f9f63a6b177914d1411b576e3923bc616a3f9cbcebec8acccf3a37d70e8"} Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.649752 4814 generic.go:334] "Generic (PLEG): container finished" podID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerID="14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b" exitCode=0 Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.649790 4814 generic.go:334] "Generic (PLEG): container finished" podID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerID="7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6" exitCode=143 Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.649823 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.649833 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c301d8a-5a53-4a62-8fd7-ba9091e38504","Type":"ContainerDied","Data":"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b"} Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.649881 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c301d8a-5a53-4a62-8fd7-ba9091e38504","Type":"ContainerDied","Data":"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6"} Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.649893 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9c301d8a-5a53-4a62-8fd7-ba9091e38504","Type":"ContainerDied","Data":"8d7b8cc8c2c6cca269405583776232768f71f7867a961314574f6cfd524a208c"} Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.649910 4814 scope.go:117] "RemoveContainer" containerID="14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.696371 4814 scope.go:117] "RemoveContainer" containerID="7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.732838 4814 scope.go:117] "RemoveContainer" containerID="14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b" Jan 22 05:38:59 crc kubenswrapper[4814]: E0122 05:38:59.735277 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b\": container with ID starting with 14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b not found: ID does not exist" containerID="14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.735311 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b"} err="failed to get container status \"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b\": rpc error: code = NotFound desc = could not find container \"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b\": container with ID starting with 14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b not found: ID does not exist" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.735336 4814 scope.go:117] "RemoveContainer" containerID="7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6" Jan 22 05:38:59 crc kubenswrapper[4814]: E0122 05:38:59.735723 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6\": container with ID starting with 7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6 not found: ID does not exist" containerID="7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.735743 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6"} err="failed to get container status \"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6\": rpc error: code = NotFound desc = could not find container \"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6\": container with ID starting with 7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6 not found: ID does not exist" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.735757 4814 scope.go:117] "RemoveContainer" containerID="14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.735988 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b"} err="failed to get container status \"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b\": rpc error: code = NotFound desc = could not find container \"14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b\": container with ID starting with 14d692f923bc481c5462346e8dfa0bc144d9fd5bb7924836b484bd0a41d3043b not found: ID does not exist" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.736002 4814 scope.go:117] "RemoveContainer" containerID="7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.736270 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6"} err="failed to get container status \"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6\": rpc error: code = NotFound desc = could not find container \"7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6\": container with ID starting with 7d35102740f9febda1b1f5acb0fae233fdb29470623d1b2ec0c9d5f2dd8086d6 not found: ID does not exist" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.752527 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-nova-metadata-tls-certs\") pod \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.752596 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c301d8a-5a53-4a62-8fd7-ba9091e38504-logs\") pod \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.752636 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-config-data\") pod \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.752688 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-combined-ca-bundle\") pod \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.752812 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cx244\" (UniqueName: \"kubernetes.io/projected/9c301d8a-5a53-4a62-8fd7-ba9091e38504-kube-api-access-cx244\") pod \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\" (UID: \"9c301d8a-5a53-4a62-8fd7-ba9091e38504\") " Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.753131 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c301d8a-5a53-4a62-8fd7-ba9091e38504-logs" (OuterVolumeSpecName: "logs") pod "9c301d8a-5a53-4a62-8fd7-ba9091e38504" (UID: "9c301d8a-5a53-4a62-8fd7-ba9091e38504"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.776605 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c301d8a-5a53-4a62-8fd7-ba9091e38504-kube-api-access-cx244" (OuterVolumeSpecName: "kube-api-access-cx244") pod "9c301d8a-5a53-4a62-8fd7-ba9091e38504" (UID: "9c301d8a-5a53-4a62-8fd7-ba9091e38504"). InnerVolumeSpecName "kube-api-access-cx244". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.799267 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-config-data" (OuterVolumeSpecName: "config-data") pod "9c301d8a-5a53-4a62-8fd7-ba9091e38504" (UID: "9c301d8a-5a53-4a62-8fd7-ba9091e38504"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.825077 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9c301d8a-5a53-4a62-8fd7-ba9091e38504" (UID: "9c301d8a-5a53-4a62-8fd7-ba9091e38504"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.825734 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "9c301d8a-5a53-4a62-8fd7-ba9091e38504" (UID: "9c301d8a-5a53-4a62-8fd7-ba9091e38504"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.854741 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cx244\" (UniqueName: \"kubernetes.io/projected/9c301d8a-5a53-4a62-8fd7-ba9091e38504-kube-api-access-cx244\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.854767 4814 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.854776 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c301d8a-5a53-4a62-8fd7-ba9091e38504-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.854786 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.854794 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c301d8a-5a53-4a62-8fd7-ba9091e38504-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:38:59 crc kubenswrapper[4814]: I0122 05:38:59.993991 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.005831 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.034835 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: E0122 05:39:00.035179 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76b670b3-c66f-4b78-a355-951299de4283" containerName="nova-manage" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035192 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="76b670b3-c66f-4b78-a355-951299de4283" containerName="nova-manage" Jan 22 05:39:00 crc kubenswrapper[4814]: E0122 05:39:00.035213 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c435e1dd-d906-4003-94cd-e78a57e0ab26" containerName="init" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035219 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c435e1dd-d906-4003-94cd-e78a57e0ab26" containerName="init" Jan 22 05:39:00 crc kubenswrapper[4814]: E0122 05:39:00.035233 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerName="nova-metadata-metadata" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035238 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerName="nova-metadata-metadata" Jan 22 05:39:00 crc kubenswrapper[4814]: E0122 05:39:00.035249 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c435e1dd-d906-4003-94cd-e78a57e0ab26" containerName="dnsmasq-dns" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035255 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c435e1dd-d906-4003-94cd-e78a57e0ab26" containerName="dnsmasq-dns" Jan 22 05:39:00 crc kubenswrapper[4814]: E0122 05:39:00.035268 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerName="nova-metadata-log" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035273 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerName="nova-metadata-log" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035453 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="76b670b3-c66f-4b78-a355-951299de4283" containerName="nova-manage" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035473 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerName="nova-metadata-metadata" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035487 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c435e1dd-d906-4003-94cd-e78a57e0ab26" containerName="dnsmasq-dns" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.035498 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" containerName="nova-metadata-log" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.036433 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.039268 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.039537 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.044196 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.068807 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.163464 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.175331 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-combined-ca-bundle\") pod \"17c96d00-7f5e-4e37-b29e-408a90b6dece\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.175477 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-config-data\") pod \"17c96d00-7f5e-4e37-b29e-408a90b6dece\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.175549 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7jcw\" (UniqueName: \"kubernetes.io/projected/17c96d00-7f5e-4e37-b29e-408a90b6dece-kube-api-access-l7jcw\") pod \"17c96d00-7f5e-4e37-b29e-408a90b6dece\" (UID: \"17c96d00-7f5e-4e37-b29e-408a90b6dece\") " Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.175945 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nfrg\" (UniqueName: \"kubernetes.io/projected/d63a66f3-c438-4af3-8f8f-41278409d5a0-kube-api-access-7nfrg\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.176004 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.176044 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63a66f3-c438-4af3-8f8f-41278409d5a0-logs\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.176092 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.176245 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-config-data\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.185753 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17c96d00-7f5e-4e37-b29e-408a90b6dece-kube-api-access-l7jcw" (OuterVolumeSpecName: "kube-api-access-l7jcw") pod "17c96d00-7f5e-4e37-b29e-408a90b6dece" (UID: "17c96d00-7f5e-4e37-b29e-408a90b6dece"). InnerVolumeSpecName "kube-api-access-l7jcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.211743 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-config-data" (OuterVolumeSpecName: "config-data") pod "17c96d00-7f5e-4e37-b29e-408a90b6dece" (UID: "17c96d00-7f5e-4e37-b29e-408a90b6dece"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.218762 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17c96d00-7f5e-4e37-b29e-408a90b6dece" (UID: "17c96d00-7f5e-4e37-b29e-408a90b6dece"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277022 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6z46\" (UniqueName: \"kubernetes.io/projected/e7881f3e-36b2-4a90-85be-291e584e8e56-kube-api-access-v6z46\") pod \"e7881f3e-36b2-4a90-85be-291e584e8e56\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277063 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-scripts\") pod \"e7881f3e-36b2-4a90-85be-291e584e8e56\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277172 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-combined-ca-bundle\") pod \"e7881f3e-36b2-4a90-85be-291e584e8e56\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277214 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-config-data\") pod \"e7881f3e-36b2-4a90-85be-291e584e8e56\" (UID: \"e7881f3e-36b2-4a90-85be-291e584e8e56\") " Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277529 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277591 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-config-data\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277653 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nfrg\" (UniqueName: \"kubernetes.io/projected/d63a66f3-c438-4af3-8f8f-41278409d5a0-kube-api-access-7nfrg\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277698 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277730 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63a66f3-c438-4af3-8f8f-41278409d5a0-logs\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277811 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277822 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17c96d00-7f5e-4e37-b29e-408a90b6dece-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.277833 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7jcw\" (UniqueName: \"kubernetes.io/projected/17c96d00-7f5e-4e37-b29e-408a90b6dece-kube-api-access-l7jcw\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.278436 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63a66f3-c438-4af3-8f8f-41278409d5a0-logs\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.279765 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7881f3e-36b2-4a90-85be-291e584e8e56-kube-api-access-v6z46" (OuterVolumeSpecName: "kube-api-access-v6z46") pod "e7881f3e-36b2-4a90-85be-291e584e8e56" (UID: "e7881f3e-36b2-4a90-85be-291e584e8e56"). InnerVolumeSpecName "kube-api-access-v6z46". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.282052 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.283243 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.283609 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-config-data\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.285088 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-scripts" (OuterVolumeSpecName: "scripts") pod "e7881f3e-36b2-4a90-85be-291e584e8e56" (UID: "e7881f3e-36b2-4a90-85be-291e584e8e56"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.295761 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nfrg\" (UniqueName: \"kubernetes.io/projected/d63a66f3-c438-4af3-8f8f-41278409d5a0-kube-api-access-7nfrg\") pod \"nova-metadata-0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.302271 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-config-data" (OuterVolumeSpecName: "config-data") pod "e7881f3e-36b2-4a90-85be-291e584e8e56" (UID: "e7881f3e-36b2-4a90-85be-291e584e8e56"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.305155 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e7881f3e-36b2-4a90-85be-291e584e8e56" (UID: "e7881f3e-36b2-4a90-85be-291e584e8e56"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.353473 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c301d8a-5a53-4a62-8fd7-ba9091e38504" path="/var/lib/kubelet/pods/9c301d8a-5a53-4a62-8fd7-ba9091e38504/volumes" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.379738 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.379798 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.379811 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6z46\" (UniqueName: \"kubernetes.io/projected/e7881f3e-36b2-4a90-85be-291e584e8e56-kube-api-access-v6z46\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.379839 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7881f3e-36b2-4a90-85be-291e584e8e56-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.392521 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.662515 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17c96d00-7f5e-4e37-b29e-408a90b6dece","Type":"ContainerDied","Data":"4683c743a8fe660d1deee3eb883f702680ab9a5dcf0a7d30fd8f784ee749fed0"} Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.666691 4814 scope.go:117] "RemoveContainer" containerID="c74b2f9f63a6b177914d1411b576e3923bc616a3f9cbcebec8acccf3a37d70e8" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.666809 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.669934 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-46576" event={"ID":"e7881f3e-36b2-4a90-85be-291e584e8e56","Type":"ContainerDied","Data":"b6dbcb1974c7ac4c7fc7bfd875a02f6ce8235b425be877215e71090bbb402d1e"} Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.669973 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6dbcb1974c7ac4c7fc7bfd875a02f6ce8235b425be877215e71090bbb402d1e" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.670010 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-46576" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.705911 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.712031 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.793583 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: E0122 05:39:00.794738 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17c96d00-7f5e-4e37-b29e-408a90b6dece" containerName="nova-scheduler-scheduler" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.794756 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="17c96d00-7f5e-4e37-b29e-408a90b6dece" containerName="nova-scheduler-scheduler" Jan 22 05:39:00 crc kubenswrapper[4814]: E0122 05:39:00.794812 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7881f3e-36b2-4a90-85be-291e584e8e56" containerName="nova-cell1-conductor-db-sync" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.794819 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7881f3e-36b2-4a90-85be-291e584e8e56" containerName="nova-cell1-conductor-db-sync" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.799238 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="17c96d00-7f5e-4e37-b29e-408a90b6dece" containerName="nova-scheduler-scheduler" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.799270 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7881f3e-36b2-4a90-85be-291e584e8e56" containerName="nova-cell1-conductor-db-sync" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.799928 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.801901 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.811496 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.843822 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.845104 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.847913 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.852508 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.878117 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.890661 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw85x\" (UniqueName: \"kubernetes.io/projected/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-kube-api-access-mw85x\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.890714 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-config-data\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.890768 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.890794 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.890924 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cjwn\" (UniqueName: \"kubernetes.io/projected/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-kube-api-access-2cjwn\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.890969 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.992006 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw85x\" (UniqueName: \"kubernetes.io/projected/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-kube-api-access-mw85x\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.992284 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-config-data\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.992349 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.992372 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.992424 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cjwn\" (UniqueName: \"kubernetes.io/projected/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-kube-api-access-2cjwn\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.992445 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.997777 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:00 crc kubenswrapper[4814]: I0122 05:39:00.998323 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:00.999713 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.002049 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-config-data\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.010233 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw85x\" (UniqueName: \"kubernetes.io/projected/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-kube-api-access-mw85x\") pod \"nova-scheduler-0\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.012016 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cjwn\" (UniqueName: \"kubernetes.io/projected/1ccfddf0-f239-4489-ba89-e8f6b3f3b474-kube-api-access-2cjwn\") pod \"nova-cell1-conductor-0\" (UID: \"1ccfddf0-f239-4489-ba89-e8f6b3f3b474\") " pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.123013 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.206092 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.602354 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.699295 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63a66f3-c438-4af3-8f8f-41278409d5a0","Type":"ContainerStarted","Data":"bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4"} Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.699335 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63a66f3-c438-4af3-8f8f-41278409d5a0","Type":"ContainerStarted","Data":"79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b"} Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.699348 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63a66f3-c438-4af3-8f8f-41278409d5a0","Type":"ContainerStarted","Data":"c8714d699716cfb677099fc739cc6ff700da45220209a57f915339da3b7469a5"} Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.711413 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc","Type":"ContainerStarted","Data":"0ad48b24efe8386fb6eae88e68ea46cd1b9cd7370b389abf52abc3beb2fc4efe"} Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.893922 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.893903146 podStartE2EDuration="2.893903146s" podCreationTimestamp="2026-01-22 05:38:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:01.721802893 +0000 UTC m=+1227.805291108" watchObservedRunningTime="2026-01-22 05:39:01.893903146 +0000 UTC m=+1227.977391361" Jan 22 05:39:01 crc kubenswrapper[4814]: I0122 05:39:01.901877 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 22 05:39:02 crc kubenswrapper[4814]: I0122 05:39:02.353409 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17c96d00-7f5e-4e37-b29e-408a90b6dece" path="/var/lib/kubelet/pods/17c96d00-7f5e-4e37-b29e-408a90b6dece/volumes" Jan 22 05:39:02 crc kubenswrapper[4814]: I0122 05:39:02.721149 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc","Type":"ContainerStarted","Data":"0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca"} Jan 22 05:39:02 crc kubenswrapper[4814]: I0122 05:39:02.725587 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"1ccfddf0-f239-4489-ba89-e8f6b3f3b474","Type":"ContainerStarted","Data":"71eb036835a250079b2173f4280bb8eb9365d677d22fc2f268d35cfcc6e8c70f"} Jan 22 05:39:02 crc kubenswrapper[4814]: I0122 05:39:02.725619 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"1ccfddf0-f239-4489-ba89-e8f6b3f3b474","Type":"ContainerStarted","Data":"2e1d33373eaa9a0adde040c62dcdb4a6950dc331fa3ad87918d2b966f5ec31b1"} Jan 22 05:39:02 crc kubenswrapper[4814]: I0122 05:39:02.750447 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.750424851 podStartE2EDuration="2.750424851s" podCreationTimestamp="2026-01-22 05:39:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:02.739517119 +0000 UTC m=+1228.823005344" watchObservedRunningTime="2026-01-22 05:39:02.750424851 +0000 UTC m=+1228.833913076" Jan 22 05:39:02 crc kubenswrapper[4814]: I0122 05:39:02.764212 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.764191854 podStartE2EDuration="2.764191854s" podCreationTimestamp="2026-01-22 05:39:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:02.759749424 +0000 UTC m=+1228.843237639" watchObservedRunningTime="2026-01-22 05:39:02.764191854 +0000 UTC m=+1228.847680069" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.645344 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.737365 4814 generic.go:334] "Generic (PLEG): container finished" podID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerID="c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06" exitCode=0 Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.737438 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b","Type":"ContainerDied","Data":"c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06"} Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.737473 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.737499 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b","Type":"ContainerDied","Data":"ebc745dbbf2f13a9b723f82271a80ca984ef48cb69738078df17f770d9322dab"} Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.737526 4814 scope.go:117] "RemoveContainer" containerID="c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.737711 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.757570 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtjtd\" (UniqueName: \"kubernetes.io/projected/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-kube-api-access-jtjtd\") pod \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.757650 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-combined-ca-bundle\") pod \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.757770 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-config-data\") pod \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.757804 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-logs\") pod \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\" (UID: \"3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b\") " Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.762580 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-logs" (OuterVolumeSpecName: "logs") pod "3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" (UID: "3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.776898 4814 scope.go:117] "RemoveContainer" containerID="d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.782126 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-kube-api-access-jtjtd" (OuterVolumeSpecName: "kube-api-access-jtjtd") pod "3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" (UID: "3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b"). InnerVolumeSpecName "kube-api-access-jtjtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.800941 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" (UID: "3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.807199 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-config-data" (OuterVolumeSpecName: "config-data") pod "3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" (UID: "3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.861913 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtjtd\" (UniqueName: \"kubernetes.io/projected/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-kube-api-access-jtjtd\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.861964 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.861981 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.861999 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.884902 4814 scope.go:117] "RemoveContainer" containerID="c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06" Jan 22 05:39:03 crc kubenswrapper[4814]: E0122 05:39:03.885391 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06\": container with ID starting with c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06 not found: ID does not exist" containerID="c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.885441 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06"} err="failed to get container status \"c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06\": rpc error: code = NotFound desc = could not find container \"c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06\": container with ID starting with c67645cea3d5e633f48321794ae7379b6c7f0f0c2708a02a4302e72935436f06 not found: ID does not exist" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.885507 4814 scope.go:117] "RemoveContainer" containerID="d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9" Jan 22 05:39:03 crc kubenswrapper[4814]: E0122 05:39:03.885968 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9\": container with ID starting with d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9 not found: ID does not exist" containerID="d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9" Jan 22 05:39:03 crc kubenswrapper[4814]: I0122 05:39:03.886021 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9"} err="failed to get container status \"d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9\": rpc error: code = NotFound desc = could not find container \"d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9\": container with ID starting with d7588dce6cb9744e78dac77fdea6e1bea99c505eb3c8911e98ea257d0c2eb1b9 not found: ID does not exist" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.083406 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.110033 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.127383 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:04 crc kubenswrapper[4814]: E0122 05:39:04.128014 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-log" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.128081 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-log" Jan 22 05:39:04 crc kubenswrapper[4814]: E0122 05:39:04.128187 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-api" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.128248 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-api" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.128469 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-log" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.128534 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" containerName="nova-api-api" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.129552 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.131373 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.163660 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.179644 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-logs\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.180540 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-config-data\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.180579 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmcjz\" (UniqueName: \"kubernetes.io/projected/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-kube-api-access-rmcjz\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.180660 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.281861 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-logs\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.281907 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-config-data\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.281953 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmcjz\" (UniqueName: \"kubernetes.io/projected/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-kube-api-access-rmcjz\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.282017 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.282412 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-logs\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.285733 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-config-data\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.286280 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.297656 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmcjz\" (UniqueName: \"kubernetes.io/projected/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-kube-api-access-rmcjz\") pod \"nova-api-0\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.362000 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b" path="/var/lib/kubelet/pods/3f2ee99e-c4db-459a-bfe8-60c4c85dfc2b/volumes" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.468109 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:04 crc kubenswrapper[4814]: I0122 05:39:04.999058 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:05 crc kubenswrapper[4814]: I0122 05:39:05.392526 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 05:39:05 crc kubenswrapper[4814]: I0122 05:39:05.393074 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 05:39:05 crc kubenswrapper[4814]: I0122 05:39:05.762876 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc3e186d-6bee-40a2-ac96-bc6a04d47ace","Type":"ContainerStarted","Data":"815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272"} Jan 22 05:39:05 crc kubenswrapper[4814]: I0122 05:39:05.763291 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc3e186d-6bee-40a2-ac96-bc6a04d47ace","Type":"ContainerStarted","Data":"28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456"} Jan 22 05:39:05 crc kubenswrapper[4814]: I0122 05:39:05.763306 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc3e186d-6bee-40a2-ac96-bc6a04d47ace","Type":"ContainerStarted","Data":"e50fce7fe3bdb6a9b6727e16819ea5f9def8cc6ec7d18c68300907a35f29bca8"} Jan 22 05:39:05 crc kubenswrapper[4814]: I0122 05:39:05.783009 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.782988182 podStartE2EDuration="1.782988182s" podCreationTimestamp="2026-01-22 05:39:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:05.782752705 +0000 UTC m=+1231.866240930" watchObservedRunningTime="2026-01-22 05:39:05.782988182 +0000 UTC m=+1231.866476397" Jan 22 05:39:06 crc kubenswrapper[4814]: I0122 05:39:06.124326 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 05:39:10 crc kubenswrapper[4814]: I0122 05:39:10.393620 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 05:39:10 crc kubenswrapper[4814]: I0122 05:39:10.394660 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 05:39:11 crc kubenswrapper[4814]: I0122 05:39:11.132071 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 05:39:11 crc kubenswrapper[4814]: I0122 05:39:11.159211 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 05:39:11 crc kubenswrapper[4814]: I0122 05:39:11.235287 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 22 05:39:11 crc kubenswrapper[4814]: I0122 05:39:11.409738 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.208:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 05:39:11 crc kubenswrapper[4814]: I0122 05:39:11.409989 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.208:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 05:39:11 crc kubenswrapper[4814]: I0122 05:39:11.893267 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 05:39:14 crc kubenswrapper[4814]: I0122 05:39:14.469861 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 05:39:14 crc kubenswrapper[4814]: I0122 05:39:14.470249 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 05:39:15 crc kubenswrapper[4814]: I0122 05:39:15.510892 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.211:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 05:39:15 crc kubenswrapper[4814]: I0122 05:39:15.551931 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.211:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 05:39:17 crc kubenswrapper[4814]: I0122 05:39:17.110073 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 05:39:20 crc kubenswrapper[4814]: I0122 05:39:20.401772 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 05:39:20 crc kubenswrapper[4814]: I0122 05:39:20.409932 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 05:39:20 crc kubenswrapper[4814]: I0122 05:39:20.413880 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.036603 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.165312 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.165520 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="4ab6f947-7aad-4ca5-98d2-0803c62ed26d" containerName="kube-state-metrics" containerID="cri-o://773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193" gracePeriod=30 Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.616476 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.669882 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hslm\" (UniqueName: \"kubernetes.io/projected/4ab6f947-7aad-4ca5-98d2-0803c62ed26d-kube-api-access-8hslm\") pod \"4ab6f947-7aad-4ca5-98d2-0803c62ed26d\" (UID: \"4ab6f947-7aad-4ca5-98d2-0803c62ed26d\") " Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.685393 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ab6f947-7aad-4ca5-98d2-0803c62ed26d-kube-api-access-8hslm" (OuterVolumeSpecName: "kube-api-access-8hslm") pod "4ab6f947-7aad-4ca5-98d2-0803c62ed26d" (UID: "4ab6f947-7aad-4ca5-98d2-0803c62ed26d"). InnerVolumeSpecName "kube-api-access-8hslm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.772694 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hslm\" (UniqueName: \"kubernetes.io/projected/4ab6f947-7aad-4ca5-98d2-0803c62ed26d-kube-api-access-8hslm\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.950921 4814 generic.go:334] "Generic (PLEG): container finished" podID="4ab6f947-7aad-4ca5-98d2-0803c62ed26d" containerID="773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193" exitCode=2 Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.951007 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.951032 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ab6f947-7aad-4ca5-98d2-0803c62ed26d","Type":"ContainerDied","Data":"773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193"} Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.951092 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"4ab6f947-7aad-4ca5-98d2-0803c62ed26d","Type":"ContainerDied","Data":"240782e9fd382f38c2a0195336ee82f980e7bc392bb01cfef6a58a68cca6ca6f"} Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.951111 4814 scope.go:117] "RemoveContainer" containerID="773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.980723 4814 scope.go:117] "RemoveContainer" containerID="773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193" Jan 22 05:39:21 crc kubenswrapper[4814]: E0122 05:39:21.981184 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193\": container with ID starting with 773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193 not found: ID does not exist" containerID="773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.981224 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193"} err="failed to get container status \"773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193\": rpc error: code = NotFound desc = could not find container \"773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193\": container with ID starting with 773247c1a08f32153bc6b4e91d1f81b803bd9cfa5a8c8a5aaf5e9b66ad1e1193 not found: ID does not exist" Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.986397 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:39:21 crc kubenswrapper[4814]: I0122 05:39:21.997093 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.016383 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:39:22 crc kubenswrapper[4814]: E0122 05:39:22.016844 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ab6f947-7aad-4ca5-98d2-0803c62ed26d" containerName="kube-state-metrics" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.016862 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ab6f947-7aad-4ca5-98d2-0803c62ed26d" containerName="kube-state-metrics" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.017036 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ab6f947-7aad-4ca5-98d2-0803c62ed26d" containerName="kube-state-metrics" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.017637 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.030474 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.030775 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.030931 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.077476 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dxvg\" (UniqueName: \"kubernetes.io/projected/243c1bef-90f9-4590-af2b-9974062c05d3-kube-api-access-6dxvg\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.077560 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.077708 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.077728 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.179212 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.179392 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.179423 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.179463 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dxvg\" (UniqueName: \"kubernetes.io/projected/243c1bef-90f9-4590-af2b-9974062c05d3-kube-api-access-6dxvg\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.182800 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.182827 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.184944 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/243c1bef-90f9-4590-af2b-9974062c05d3-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.195574 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dxvg\" (UniqueName: \"kubernetes.io/projected/243c1bef-90f9-4590-af2b-9974062c05d3-kube-api-access-6dxvg\") pod \"kube-state-metrics-0\" (UID: \"243c1bef-90f9-4590-af2b-9974062c05d3\") " pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.330970 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.353583 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ab6f947-7aad-4ca5-98d2-0803c62ed26d" path="/var/lib/kubelet/pods/4ab6f947-7aad-4ca5-98d2-0803c62ed26d/volumes" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.806065 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.887299 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.961553 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"243c1bef-90f9-4590-af2b-9974062c05d3","Type":"ContainerStarted","Data":"a22bc43b942fe5edec20cf5183069f4b35f9a17185c76a5f37c1dea3e8c1e41b"} Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.964410 4814 generic.go:334] "Generic (PLEG): container finished" podID="557eb2ea-3709-4556-a2e2-03df0c6f955b" containerID="b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819" exitCode=137 Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.964469 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"557eb2ea-3709-4556-a2e2-03df0c6f955b","Type":"ContainerDied","Data":"b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819"} Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.964487 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.964555 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"557eb2ea-3709-4556-a2e2-03df0c6f955b","Type":"ContainerDied","Data":"a39af41c08cfbcf97c40e4d9e2e1da1e5ba5c98ad6d656ee3b1e17914ee5a198"} Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.964574 4814 scope.go:117] "RemoveContainer" containerID="b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.986652 4814 scope.go:117] "RemoveContainer" containerID="b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819" Jan 22 05:39:22 crc kubenswrapper[4814]: E0122 05:39:22.987148 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819\": container with ID starting with b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819 not found: ID does not exist" containerID="b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819" Jan 22 05:39:22 crc kubenswrapper[4814]: I0122 05:39:22.987184 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819"} err="failed to get container status \"b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819\": rpc error: code = NotFound desc = could not find container \"b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819\": container with ID starting with b70967ab8a1ee48ef17f25d184f47bc75bdf8409fccae9cc76043f0bbea26819 not found: ID does not exist" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.028127 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-config-data\") pod \"557eb2ea-3709-4556-a2e2-03df0c6f955b\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.028413 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncx8v\" (UniqueName: \"kubernetes.io/projected/557eb2ea-3709-4556-a2e2-03df0c6f955b-kube-api-access-ncx8v\") pod \"557eb2ea-3709-4556-a2e2-03df0c6f955b\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.028680 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-combined-ca-bundle\") pod \"557eb2ea-3709-4556-a2e2-03df0c6f955b\" (UID: \"557eb2ea-3709-4556-a2e2-03df0c6f955b\") " Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.033990 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/557eb2ea-3709-4556-a2e2-03df0c6f955b-kube-api-access-ncx8v" (OuterVolumeSpecName: "kube-api-access-ncx8v") pod "557eb2ea-3709-4556-a2e2-03df0c6f955b" (UID: "557eb2ea-3709-4556-a2e2-03df0c6f955b"). InnerVolumeSpecName "kube-api-access-ncx8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.053920 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-config-data" (OuterVolumeSpecName: "config-data") pod "557eb2ea-3709-4556-a2e2-03df0c6f955b" (UID: "557eb2ea-3709-4556-a2e2-03df0c6f955b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.060159 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "557eb2ea-3709-4556-a2e2-03df0c6f955b" (UID: "557eb2ea-3709-4556-a2e2-03df0c6f955b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.131178 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.131211 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557eb2ea-3709-4556-a2e2-03df0c6f955b-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.131221 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncx8v\" (UniqueName: \"kubernetes.io/projected/557eb2ea-3709-4556-a2e2-03df0c6f955b-kube-api-access-ncx8v\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.234399 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.234841 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="ceilometer-central-agent" containerID="cri-o://3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227" gracePeriod=30 Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.235041 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="proxy-httpd" containerID="cri-o://626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248" gracePeriod=30 Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.235202 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="sg-core" containerID="cri-o://5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432" gracePeriod=30 Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.235225 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="ceilometer-notification-agent" containerID="cri-o://3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d" gracePeriod=30 Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.531192 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.542240 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.554388 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:39:23 crc kubenswrapper[4814]: E0122 05:39:23.554804 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="557eb2ea-3709-4556-a2e2-03df0c6f955b" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.554817 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="557eb2ea-3709-4556-a2e2-03df0c6f955b" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.554984 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="557eb2ea-3709-4556-a2e2-03df0c6f955b" containerName="nova-cell1-novncproxy-novncproxy" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.555592 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.557590 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.559809 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.561746 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.577156 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.642737 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9z5g\" (UniqueName: \"kubernetes.io/projected/fee01c3d-8ced-4407-aebf-07a7db0a2f06-kube-api-access-b9z5g\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.643176 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.643328 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.643549 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.643695 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.745369 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.745494 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9z5g\" (UniqueName: \"kubernetes.io/projected/fee01c3d-8ced-4407-aebf-07a7db0a2f06-kube-api-access-b9z5g\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.745525 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.745544 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.745601 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.751168 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.751251 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.757041 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.765713 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9z5g\" (UniqueName: \"kubernetes.io/projected/fee01c3d-8ced-4407-aebf-07a7db0a2f06-kube-api-access-b9z5g\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.765960 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/fee01c3d-8ced-4407-aebf-07a7db0a2f06-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"fee01c3d-8ced-4407-aebf-07a7db0a2f06\") " pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.868689 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.986833 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"243c1bef-90f9-4590-af2b-9974062c05d3","Type":"ContainerStarted","Data":"5cdc7f56c1f68f0f566995b835693a8b83c5cdb3cbee3d031442bd00487d75f5"} Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.987265 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.992471 4814 generic.go:334] "Generic (PLEG): container finished" podID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerID="626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248" exitCode=0 Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.992498 4814 generic.go:334] "Generic (PLEG): container finished" podID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerID="5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432" exitCode=2 Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.992506 4814 generic.go:334] "Generic (PLEG): container finished" podID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerID="3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227" exitCode=0 Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.992525 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerDied","Data":"626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248"} Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.992545 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerDied","Data":"5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432"} Jan 22 05:39:23 crc kubenswrapper[4814]: I0122 05:39:23.992555 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerDied","Data":"3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227"} Jan 22 05:39:24 crc kubenswrapper[4814]: I0122 05:39:24.012594 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.632129738 podStartE2EDuration="3.01257351s" podCreationTimestamp="2026-01-22 05:39:21 +0000 UTC" firstStartedPulling="2026-01-22 05:39:22.829716101 +0000 UTC m=+1248.913204316" lastFinishedPulling="2026-01-22 05:39:23.210159873 +0000 UTC m=+1249.293648088" observedRunningTime="2026-01-22 05:39:24.0017389 +0000 UTC m=+1250.085227115" watchObservedRunningTime="2026-01-22 05:39:24.01257351 +0000 UTC m=+1250.096061725" Jan 22 05:39:24 crc kubenswrapper[4814]: I0122 05:39:24.364126 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="557eb2ea-3709-4556-a2e2-03df0c6f955b" path="/var/lib/kubelet/pods/557eb2ea-3709-4556-a2e2-03df0c6f955b/volumes" Jan 22 05:39:24 crc kubenswrapper[4814]: I0122 05:39:24.364979 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 22 05:39:24 crc kubenswrapper[4814]: I0122 05:39:24.479478 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 05:39:24 crc kubenswrapper[4814]: I0122 05:39:24.480882 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 05:39:24 crc kubenswrapper[4814]: I0122 05:39:24.489443 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 05:39:24 crc kubenswrapper[4814]: I0122 05:39:24.509965 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.003422 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"fee01c3d-8ced-4407-aebf-07a7db0a2f06","Type":"ContainerStarted","Data":"da3d669978e9e2f4f14ac6920bad54cc31bec07929d6049fcc4d3d2ed9efb3b3"} Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.003459 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.003473 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"fee01c3d-8ced-4407-aebf-07a7db0a2f06","Type":"ContainerStarted","Data":"7b10d19dcf928b898935ec57028363b7271c0a2ca2d065d96f50304cf8c49e1e"} Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.013178 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.031094 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.03107318 podStartE2EDuration="2.03107318s" podCreationTimestamp="2026-01-22 05:39:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:25.022720188 +0000 UTC m=+1251.106208413" watchObservedRunningTime="2026-01-22 05:39:25.03107318 +0000 UTC m=+1251.114561395" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.348402 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-79fbn"] Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.350419 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.368009 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-79fbn"] Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.489447 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.489499 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.489528 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.489579 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-config\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.489602 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcwfl\" (UniqueName: \"kubernetes.io/projected/b575a98c-ea17-4a9a-b796-d54720b31dfa-kube-api-access-dcwfl\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.489734 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.591045 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.591094 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.591127 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.591179 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-config\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.591208 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcwfl\" (UniqueName: \"kubernetes.io/projected/b575a98c-ea17-4a9a-b796-d54720b31dfa-kube-api-access-dcwfl\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.591245 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.593355 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.593911 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.594482 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.594534 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-config\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.595416 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.621281 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcwfl\" (UniqueName: \"kubernetes.io/projected/b575a98c-ea17-4a9a-b796-d54720b31dfa-kube-api-access-dcwfl\") pod \"dnsmasq-dns-79b5d74c8c-79fbn\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.675593 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.737969 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.800335 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-combined-ca-bundle\") pod \"00d60555-cd25-4759-bcd0-ecd90c911a21\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.800456 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-run-httpd\") pod \"00d60555-cd25-4759-bcd0-ecd90c911a21\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.800478 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-log-httpd\") pod \"00d60555-cd25-4759-bcd0-ecd90c911a21\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.800505 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lftp\" (UniqueName: \"kubernetes.io/projected/00d60555-cd25-4759-bcd0-ecd90c911a21-kube-api-access-4lftp\") pod \"00d60555-cd25-4759-bcd0-ecd90c911a21\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.800586 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-config-data\") pod \"00d60555-cd25-4759-bcd0-ecd90c911a21\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.800607 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-scripts\") pod \"00d60555-cd25-4759-bcd0-ecd90c911a21\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.800721 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-sg-core-conf-yaml\") pod \"00d60555-cd25-4759-bcd0-ecd90c911a21\" (UID: \"00d60555-cd25-4759-bcd0-ecd90c911a21\") " Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.806810 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "00d60555-cd25-4759-bcd0-ecd90c911a21" (UID: "00d60555-cd25-4759-bcd0-ecd90c911a21"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.807651 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "00d60555-cd25-4759-bcd0-ecd90c911a21" (UID: "00d60555-cd25-4759-bcd0-ecd90c911a21"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.817831 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00d60555-cd25-4759-bcd0-ecd90c911a21-kube-api-access-4lftp" (OuterVolumeSpecName: "kube-api-access-4lftp") pod "00d60555-cd25-4759-bcd0-ecd90c911a21" (UID: "00d60555-cd25-4759-bcd0-ecd90c911a21"). InnerVolumeSpecName "kube-api-access-4lftp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.827500 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-scripts" (OuterVolumeSpecName: "scripts") pod "00d60555-cd25-4759-bcd0-ecd90c911a21" (UID: "00d60555-cd25-4759-bcd0-ecd90c911a21"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.864392 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "00d60555-cd25-4759-bcd0-ecd90c911a21" (UID: "00d60555-cd25-4759-bcd0-ecd90c911a21"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.902598 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.902840 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lftp\" (UniqueName: \"kubernetes.io/projected/00d60555-cd25-4759-bcd0-ecd90c911a21-kube-api-access-4lftp\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.902937 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.903022 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:25 crc kubenswrapper[4814]: I0122 05:39:25.903104 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/00d60555-cd25-4759-bcd0-ecd90c911a21-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.021444 4814 generic.go:334] "Generic (PLEG): container finished" podID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerID="3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d" exitCode=0 Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.021758 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerDied","Data":"3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d"} Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.021815 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"00d60555-cd25-4759-bcd0-ecd90c911a21","Type":"ContainerDied","Data":"492e6eaa02c38ec111d5f1e8ad5738a0020a057eb420bd84bd87b3434e0c9f41"} Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.021834 4814 scope.go:117] "RemoveContainer" containerID="626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.022029 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.029956 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00d60555-cd25-4759-bcd0-ecd90c911a21" (UID: "00d60555-cd25-4759-bcd0-ecd90c911a21"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.101421 4814 scope.go:117] "RemoveContainer" containerID="5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.111913 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.155151 4814 scope.go:117] "RemoveContainer" containerID="3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d" Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.162315 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/rpm-ostreed.service\": RecentStats: unable to find data in memory cache]" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.162885 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-config-data" (OuterVolumeSpecName: "config-data") pod "00d60555-cd25-4759-bcd0-ecd90c911a21" (UID: "00d60555-cd25-4759-bcd0-ecd90c911a21"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.193563 4814 scope.go:117] "RemoveContainer" containerID="3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.215736 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00d60555-cd25-4759-bcd0-ecd90c911a21-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.250441 4814 scope.go:117] "RemoveContainer" containerID="626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248" Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.250983 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248\": container with ID starting with 626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248 not found: ID does not exist" containerID="626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.251082 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248"} err="failed to get container status \"626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248\": rpc error: code = NotFound desc = could not find container \"626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248\": container with ID starting with 626f98207176024c4d1bb1b2d76675e9858a5a17eef53d074f31af4e38abb248 not found: ID does not exist" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.251168 4814 scope.go:117] "RemoveContainer" containerID="5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432" Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.251513 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432\": container with ID starting with 5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432 not found: ID does not exist" containerID="5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.251595 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432"} err="failed to get container status \"5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432\": rpc error: code = NotFound desc = could not find container \"5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432\": container with ID starting with 5cb43b5eae661fb8ec60e75993d151d6c6eab71da0e99743efd11df83f784432 not found: ID does not exist" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.251685 4814 scope.go:117] "RemoveContainer" containerID="3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d" Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.252172 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d\": container with ID starting with 3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d not found: ID does not exist" containerID="3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.252266 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d"} err="failed to get container status \"3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d\": rpc error: code = NotFound desc = could not find container \"3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d\": container with ID starting with 3070517ec1dd6e714aeb14c3c675244f7be2497bf2c1f627fa0a235bc2f65b5d not found: ID does not exist" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.252347 4814 scope.go:117] "RemoveContainer" containerID="3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227" Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.252642 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227\": container with ID starting with 3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227 not found: ID does not exist" containerID="3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.252721 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227"} err="failed to get container status \"3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227\": rpc error: code = NotFound desc = could not find container \"3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227\": container with ID starting with 3527f257aca92628f097c773589cfd401b4173cdbbe08dbbb7a7261c89691227 not found: ID does not exist" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.375733 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.387218 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414068 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.414432 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="ceilometer-notification-agent" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414443 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="ceilometer-notification-agent" Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.414457 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="ceilometer-central-agent" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414462 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="ceilometer-central-agent" Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.414478 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="sg-core" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414485 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="sg-core" Jan 22 05:39:26 crc kubenswrapper[4814]: E0122 05:39:26.414516 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="proxy-httpd" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414522 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="proxy-httpd" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414755 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="ceilometer-notification-agent" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414776 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="ceilometer-central-agent" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414788 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="proxy-httpd" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.414809 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" containerName="sg-core" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.417214 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.425049 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.425408 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.436542 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.479419 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.492719 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-79fbn"] Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.529620 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-config-data\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.529830 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.529926 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.530013 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-run-httpd\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.530078 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.530166 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt88f\" (UniqueName: \"kubernetes.io/projected/02698b26-7a8b-45de-a007-c7b1723365a9-kube-api-access-pt88f\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.530261 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-log-httpd\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.530362 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-scripts\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.631777 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.631913 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.632033 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-run-httpd\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.632126 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.632219 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt88f\" (UniqueName: \"kubernetes.io/projected/02698b26-7a8b-45de-a007-c7b1723365a9-kube-api-access-pt88f\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.633220 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-log-httpd\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.633341 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-scripts\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.633443 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-config-data\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.636671 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-run-httpd\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.641031 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.641901 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-config-data\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.642130 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-log-httpd\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.650636 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.651274 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.651610 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-scripts\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.652373 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt88f\" (UniqueName: \"kubernetes.io/projected/02698b26-7a8b-45de-a007-c7b1723365a9-kube-api-access-pt88f\") pod \"ceilometer-0\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " pod="openstack/ceilometer-0" Jan 22 05:39:26 crc kubenswrapper[4814]: I0122 05:39:26.883329 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:39:27 crc kubenswrapper[4814]: I0122 05:39:27.032372 4814 generic.go:334] "Generic (PLEG): container finished" podID="b575a98c-ea17-4a9a-b796-d54720b31dfa" containerID="493b0c05d79200619d9adc7aa3582e162e8a72132d4ecd57c46088f6928be9b4" exitCode=0 Jan 22 05:39:27 crc kubenswrapper[4814]: I0122 05:39:27.032431 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" event={"ID":"b575a98c-ea17-4a9a-b796-d54720b31dfa","Type":"ContainerDied","Data":"493b0c05d79200619d9adc7aa3582e162e8a72132d4ecd57c46088f6928be9b4"} Jan 22 05:39:27 crc kubenswrapper[4814]: I0122 05:39:27.032488 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" event={"ID":"b575a98c-ea17-4a9a-b796-d54720b31dfa","Type":"ContainerStarted","Data":"9d3b8a8c64899afe7090a7996942db6d26fa44a7db2f7fb0fc15a7b9fc642d9e"} Jan 22 05:39:27 crc kubenswrapper[4814]: I0122 05:39:27.552199 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:28 crc kubenswrapper[4814]: I0122 05:39:28.009840 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:28 crc kubenswrapper[4814]: I0122 05:39:28.042687 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerStarted","Data":"c859826d5e6bed2ef3b2a7a7a8b56b3d336ba1701ac291a69b62e37974f94cd3"} Jan 22 05:39:28 crc kubenswrapper[4814]: I0122 05:39:28.048372 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" event={"ID":"b575a98c-ea17-4a9a-b796-d54720b31dfa","Type":"ContainerStarted","Data":"b59c5c88b85597c6d0ded98b237871ae9f80196b2071bef48b22795f20cfc06f"} Jan 22 05:39:28 crc kubenswrapper[4814]: I0122 05:39:28.048498 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-log" containerID="cri-o://28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456" gracePeriod=30 Jan 22 05:39:28 crc kubenswrapper[4814]: I0122 05:39:28.048578 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-api" containerID="cri-o://815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272" gracePeriod=30 Jan 22 05:39:28 crc kubenswrapper[4814]: I0122 05:39:28.081256 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" podStartSLOduration=3.081235994 podStartE2EDuration="3.081235994s" podCreationTimestamp="2026-01-22 05:39:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:28.073480261 +0000 UTC m=+1254.156968476" watchObservedRunningTime="2026-01-22 05:39:28.081235994 +0000 UTC m=+1254.164724199" Jan 22 05:39:28 crc kubenswrapper[4814]: I0122 05:39:28.354530 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00d60555-cd25-4759-bcd0-ecd90c911a21" path="/var/lib/kubelet/pods/00d60555-cd25-4759-bcd0-ecd90c911a21/volumes" Jan 22 05:39:28 crc kubenswrapper[4814]: I0122 05:39:28.869137 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:29 crc kubenswrapper[4814]: I0122 05:39:29.059090 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerStarted","Data":"7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b"} Jan 22 05:39:29 crc kubenswrapper[4814]: I0122 05:39:29.059130 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerStarted","Data":"89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977"} Jan 22 05:39:29 crc kubenswrapper[4814]: I0122 05:39:29.061019 4814 generic.go:334] "Generic (PLEG): container finished" podID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerID="28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456" exitCode=143 Jan 22 05:39:29 crc kubenswrapper[4814]: I0122 05:39:29.061073 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc3e186d-6bee-40a2-ac96-bc6a04d47ace","Type":"ContainerDied","Data":"28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456"} Jan 22 05:39:29 crc kubenswrapper[4814]: I0122 05:39:29.061356 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:29 crc kubenswrapper[4814]: I0122 05:39:29.568872 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:30 crc kubenswrapper[4814]: I0122 05:39:30.069708 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerStarted","Data":"14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8"} Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.079955 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerStarted","Data":"c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4"} Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.080103 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="ceilometer-central-agent" containerID="cri-o://89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977" gracePeriod=30 Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.080145 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="proxy-httpd" containerID="cri-o://c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4" gracePeriod=30 Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.080194 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="ceilometer-notification-agent" containerID="cri-o://7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b" gracePeriod=30 Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.080154 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="sg-core" containerID="cri-o://14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8" gracePeriod=30 Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.080325 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.131570 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.086621003 podStartE2EDuration="5.131542831s" podCreationTimestamp="2026-01-22 05:39:26 +0000 UTC" firstStartedPulling="2026-01-22 05:39:27.613719849 +0000 UTC m=+1253.697208054" lastFinishedPulling="2026-01-22 05:39:30.658641667 +0000 UTC m=+1256.742129882" observedRunningTime="2026-01-22 05:39:31.104491992 +0000 UTC m=+1257.187980207" watchObservedRunningTime="2026-01-22 05:39:31.131542831 +0000 UTC m=+1257.215031056" Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.897109 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.937202 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-combined-ca-bundle\") pod \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.937379 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmcjz\" (UniqueName: \"kubernetes.io/projected/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-kube-api-access-rmcjz\") pod \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.937416 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-logs\") pod \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.937436 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-config-data\") pod \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\" (UID: \"cc3e186d-6bee-40a2-ac96-bc6a04d47ace\") " Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.938599 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-logs" (OuterVolumeSpecName: "logs") pod "cc3e186d-6bee-40a2-ac96-bc6a04d47ace" (UID: "cc3e186d-6bee-40a2-ac96-bc6a04d47ace"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.961105 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-kube-api-access-rmcjz" (OuterVolumeSpecName: "kube-api-access-rmcjz") pod "cc3e186d-6bee-40a2-ac96-bc6a04d47ace" (UID: "cc3e186d-6bee-40a2-ac96-bc6a04d47ace"). InnerVolumeSpecName "kube-api-access-rmcjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:31 crc kubenswrapper[4814]: I0122 05:39:31.970818 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc3e186d-6bee-40a2-ac96-bc6a04d47ace" (UID: "cc3e186d-6bee-40a2-ac96-bc6a04d47ace"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.012692 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-config-data" (OuterVolumeSpecName: "config-data") pod "cc3e186d-6bee-40a2-ac96-bc6a04d47ace" (UID: "cc3e186d-6bee-40a2-ac96-bc6a04d47ace"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.039802 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmcjz\" (UniqueName: \"kubernetes.io/projected/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-kube-api-access-rmcjz\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.039833 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.039843 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.039852 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc3e186d-6bee-40a2-ac96-bc6a04d47ace-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.090952 4814 generic.go:334] "Generic (PLEG): container finished" podID="02698b26-7a8b-45de-a007-c7b1723365a9" containerID="c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4" exitCode=0 Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.090981 4814 generic.go:334] "Generic (PLEG): container finished" podID="02698b26-7a8b-45de-a007-c7b1723365a9" containerID="14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8" exitCode=2 Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.090993 4814 generic.go:334] "Generic (PLEG): container finished" podID="02698b26-7a8b-45de-a007-c7b1723365a9" containerID="7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b" exitCode=0 Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.091010 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerDied","Data":"c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4"} Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.091061 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerDied","Data":"14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8"} Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.091081 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerDied","Data":"7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b"} Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.093036 4814 generic.go:334] "Generic (PLEG): container finished" podID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerID="815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272" exitCode=0 Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.093075 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc3e186d-6bee-40a2-ac96-bc6a04d47ace","Type":"ContainerDied","Data":"815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272"} Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.093102 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cc3e186d-6bee-40a2-ac96-bc6a04d47ace","Type":"ContainerDied","Data":"e50fce7fe3bdb6a9b6727e16819ea5f9def8cc6ec7d18c68300907a35f29bca8"} Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.093103 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.093117 4814 scope.go:117] "RemoveContainer" containerID="815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.130342 4814 scope.go:117] "RemoveContainer" containerID="28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.156695 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.181255 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.183276 4814 scope.go:117] "RemoveContainer" containerID="815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272" Jan 22 05:39:32 crc kubenswrapper[4814]: E0122 05:39:32.184847 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272\": container with ID starting with 815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272 not found: ID does not exist" containerID="815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.184940 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272"} err="failed to get container status \"815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272\": rpc error: code = NotFound desc = could not find container \"815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272\": container with ID starting with 815d9ce6656b03b81dd0d4710987dcdd080336f79bc3386fc5c570dcff2cc272 not found: ID does not exist" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.185030 4814 scope.go:117] "RemoveContainer" containerID="28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456" Jan 22 05:39:32 crc kubenswrapper[4814]: E0122 05:39:32.185452 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456\": container with ID starting with 28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456 not found: ID does not exist" containerID="28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.185539 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456"} err="failed to get container status \"28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456\": rpc error: code = NotFound desc = could not find container \"28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456\": container with ID starting with 28f590a3f43c76e32cd9ff9650b494043d575bb3dd0ffb4e28936582526cd456 not found: ID does not exist" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.193984 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:32 crc kubenswrapper[4814]: E0122 05:39:32.194352 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-log" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.194365 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-log" Jan 22 05:39:32 crc kubenswrapper[4814]: E0122 05:39:32.194375 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-api" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.194381 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-api" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.194541 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-log" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.194565 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" containerName="nova-api-api" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.195463 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.199076 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.199335 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.199524 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.203687 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.249763 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-public-tls-certs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.249809 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czgmm\" (UniqueName: \"kubernetes.io/projected/725f36e1-6a7d-452b-bb96-0d8f18841546-kube-api-access-czgmm\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.249859 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-internal-tls-certs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.249916 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-config-data\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.249935 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.249957 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/725f36e1-6a7d-452b-bb96-0d8f18841546-logs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.341768 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.352530 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-internal-tls-certs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.352616 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-config-data\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.352679 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.352707 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/725f36e1-6a7d-452b-bb96-0d8f18841546-logs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.352771 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-public-tls-certs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.352793 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czgmm\" (UniqueName: \"kubernetes.io/projected/725f36e1-6a7d-452b-bb96-0d8f18841546-kube-api-access-czgmm\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.353276 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/725f36e1-6a7d-452b-bb96-0d8f18841546-logs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.354294 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc3e186d-6bee-40a2-ac96-bc6a04d47ace" path="/var/lib/kubelet/pods/cc3e186d-6bee-40a2-ac96-bc6a04d47ace/volumes" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.358185 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-internal-tls-certs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.361871 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-public-tls-certs\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.362154 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-config-data\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.365110 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.376796 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czgmm\" (UniqueName: \"kubernetes.io/projected/725f36e1-6a7d-452b-bb96-0d8f18841546-kube-api-access-czgmm\") pod \"nova-api-0\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " pod="openstack/nova-api-0" Jan 22 05:39:32 crc kubenswrapper[4814]: I0122 05:39:32.514262 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:33 crc kubenswrapper[4814]: I0122 05:39:33.004619 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:33 crc kubenswrapper[4814]: I0122 05:39:33.105325 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"725f36e1-6a7d-452b-bb96-0d8f18841546","Type":"ContainerStarted","Data":"0b2d48770741daa3c3d97b6e56074f8cf5adf26a6c76419d47d33319c3e86af9"} Jan 22 05:39:33 crc kubenswrapper[4814]: I0122 05:39:33.869490 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:33 crc kubenswrapper[4814]: I0122 05:39:33.885829 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.165258 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"725f36e1-6a7d-452b-bb96-0d8f18841546","Type":"ContainerStarted","Data":"26dbc5f9cc31a682231fc1cd1937469cbf05a324720be13945a4eb88ea09f0fa"} Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.165575 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"725f36e1-6a7d-452b-bb96-0d8f18841546","Type":"ContainerStarted","Data":"a317f0fc6065690b1500e088e81a5683312fea0a30967ff2a4143434f1ac7723"} Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.208433 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.208419143 podStartE2EDuration="2.208419143s" podCreationTimestamp="2026-01-22 05:39:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:34.201189486 +0000 UTC m=+1260.284677701" watchObservedRunningTime="2026-01-22 05:39:34.208419143 +0000 UTC m=+1260.291907358" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.240939 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.531684 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-b6chm"] Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.533036 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.536707 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.536886 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.561707 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-b6chm"] Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.619777 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.620043 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snblv\" (UniqueName: \"kubernetes.io/projected/79b7af13-1acf-421b-914f-2f8fd797cbe3-kube-api-access-snblv\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.620157 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-scripts\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.620248 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-config-data\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.721752 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-scripts\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.721815 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-config-data\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.721896 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.721941 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snblv\" (UniqueName: \"kubernetes.io/projected/79b7af13-1acf-421b-914f-2f8fd797cbe3-kube-api-access-snblv\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.726908 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-config-data\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.727721 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-scripts\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.741208 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.744211 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snblv\" (UniqueName: \"kubernetes.io/projected/79b7af13-1acf-421b-914f-2f8fd797cbe3-kube-api-access-snblv\") pod \"nova-cell1-cell-mapping-b6chm\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:34 crc kubenswrapper[4814]: I0122 05:39:34.864766 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.145245 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.193670 4814 generic.go:334] "Generic (PLEG): container finished" podID="02698b26-7a8b-45de-a007-c7b1723365a9" containerID="89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977" exitCode=0 Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.193763 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.193800 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerDied","Data":"89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977"} Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.193859 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"02698b26-7a8b-45de-a007-c7b1723365a9","Type":"ContainerDied","Data":"c859826d5e6bed2ef3b2a7a7a8b56b3d336ba1701ac291a69b62e37974f94cd3"} Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.193876 4814 scope.go:117] "RemoveContainer" containerID="c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.215754 4814 scope.go:117] "RemoveContainer" containerID="14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.230668 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-sg-core-conf-yaml\") pod \"02698b26-7a8b-45de-a007-c7b1723365a9\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.230755 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-scripts\") pod \"02698b26-7a8b-45de-a007-c7b1723365a9\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.230853 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-config-data\") pod \"02698b26-7a8b-45de-a007-c7b1723365a9\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.230878 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-run-httpd\") pod \"02698b26-7a8b-45de-a007-c7b1723365a9\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.230895 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-combined-ca-bundle\") pod \"02698b26-7a8b-45de-a007-c7b1723365a9\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.230930 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-log-httpd\") pod \"02698b26-7a8b-45de-a007-c7b1723365a9\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.230957 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-ceilometer-tls-certs\") pod \"02698b26-7a8b-45de-a007-c7b1723365a9\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.231037 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pt88f\" (UniqueName: \"kubernetes.io/projected/02698b26-7a8b-45de-a007-c7b1723365a9-kube-api-access-pt88f\") pod \"02698b26-7a8b-45de-a007-c7b1723365a9\" (UID: \"02698b26-7a8b-45de-a007-c7b1723365a9\") " Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.231522 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "02698b26-7a8b-45de-a007-c7b1723365a9" (UID: "02698b26-7a8b-45de-a007-c7b1723365a9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.234563 4814 scope.go:117] "RemoveContainer" containerID="7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.234963 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-scripts" (OuterVolumeSpecName: "scripts") pod "02698b26-7a8b-45de-a007-c7b1723365a9" (UID: "02698b26-7a8b-45de-a007-c7b1723365a9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.235073 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "02698b26-7a8b-45de-a007-c7b1723365a9" (UID: "02698b26-7a8b-45de-a007-c7b1723365a9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.235807 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02698b26-7a8b-45de-a007-c7b1723365a9-kube-api-access-pt88f" (OuterVolumeSpecName: "kube-api-access-pt88f") pod "02698b26-7a8b-45de-a007-c7b1723365a9" (UID: "02698b26-7a8b-45de-a007-c7b1723365a9"). InnerVolumeSpecName "kube-api-access-pt88f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.258842 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "02698b26-7a8b-45de-a007-c7b1723365a9" (UID: "02698b26-7a8b-45de-a007-c7b1723365a9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.270153 4814 scope.go:117] "RemoveContainer" containerID="89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.284598 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "02698b26-7a8b-45de-a007-c7b1723365a9" (UID: "02698b26-7a8b-45de-a007-c7b1723365a9"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.290540 4814 scope.go:117] "RemoveContainer" containerID="c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4" Jan 22 05:39:35 crc kubenswrapper[4814]: E0122 05:39:35.293713 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4\": container with ID starting with c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4 not found: ID does not exist" containerID="c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.293744 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4"} err="failed to get container status \"c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4\": rpc error: code = NotFound desc = could not find container \"c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4\": container with ID starting with c949826cdc208911a83ff3f427db751faeeae6f9b8fd119312a90e2a6cb290d4 not found: ID does not exist" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.293763 4814 scope.go:117] "RemoveContainer" containerID="14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8" Jan 22 05:39:35 crc kubenswrapper[4814]: E0122 05:39:35.294174 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8\": container with ID starting with 14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8 not found: ID does not exist" containerID="14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.294206 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8"} err="failed to get container status \"14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8\": rpc error: code = NotFound desc = could not find container \"14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8\": container with ID starting with 14fb4ccff34a0eb4348ee078c975f96a877ba897e8023c58fea24fcd1b6a82d8 not found: ID does not exist" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.294231 4814 scope.go:117] "RemoveContainer" containerID="7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b" Jan 22 05:39:35 crc kubenswrapper[4814]: E0122 05:39:35.294572 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b\": container with ID starting with 7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b not found: ID does not exist" containerID="7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.294687 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b"} err="failed to get container status \"7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b\": rpc error: code = NotFound desc = could not find container \"7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b\": container with ID starting with 7afeab68b9846fe7770ed1f2f0eb17fb6df47c3a70dd14f366491da0ffec8a6b not found: ID does not exist" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.294702 4814 scope.go:117] "RemoveContainer" containerID="89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977" Jan 22 05:39:35 crc kubenswrapper[4814]: E0122 05:39:35.295005 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977\": container with ID starting with 89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977 not found: ID does not exist" containerID="89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.295106 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977"} err="failed to get container status \"89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977\": rpc error: code = NotFound desc = could not find container \"89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977\": container with ID starting with 89341074f93cd3943af2dfb70fb07dca71595708a898baacc724470574feb977 not found: ID does not exist" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.324103 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "02698b26-7a8b-45de-a007-c7b1723365a9" (UID: "02698b26-7a8b-45de-a007-c7b1723365a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.333356 4814 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.333379 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.333389 4814 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/02698b26-7a8b-45de-a007-c7b1723365a9-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.333397 4814 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.333405 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pt88f\" (UniqueName: \"kubernetes.io/projected/02698b26-7a8b-45de-a007-c7b1723365a9-kube-api-access-pt88f\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.333414 4814 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.333421 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.366333 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-config-data" (OuterVolumeSpecName: "config-data") pod "02698b26-7a8b-45de-a007-c7b1723365a9" (UID: "02698b26-7a8b-45de-a007-c7b1723365a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.434710 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02698b26-7a8b-45de-a007-c7b1723365a9-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.453417 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-b6chm"] Jan 22 05:39:35 crc kubenswrapper[4814]: W0122 05:39:35.457977 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod79b7af13_1acf_421b_914f_2f8fd797cbe3.slice/crio-40fe13b64f99bd46f5ade4b2eaacfde5912b499801c10441032399041d449814 WatchSource:0}: Error finding container 40fe13b64f99bd46f5ade4b2eaacfde5912b499801c10441032399041d449814: Status 404 returned error can't find the container with id 40fe13b64f99bd46f5ade4b2eaacfde5912b499801c10441032399041d449814 Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.554034 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.567245 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.581902 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:35 crc kubenswrapper[4814]: E0122 05:39:35.582427 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="ceilometer-central-agent" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.582447 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="ceilometer-central-agent" Jan 22 05:39:35 crc kubenswrapper[4814]: E0122 05:39:35.582470 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="sg-core" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.582479 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="sg-core" Jan 22 05:39:35 crc kubenswrapper[4814]: E0122 05:39:35.582494 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="ceilometer-notification-agent" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.582503 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="ceilometer-notification-agent" Jan 22 05:39:35 crc kubenswrapper[4814]: E0122 05:39:35.582520 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="proxy-httpd" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.582529 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="proxy-httpd" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.582811 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="ceilometer-central-agent" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.582828 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="ceilometer-notification-agent" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.582866 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="proxy-httpd" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.582883 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" containerName="sg-core" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.588984 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.589095 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.590799 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.592078 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.592205 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.649209 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.649485 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ccrv\" (UniqueName: \"kubernetes.io/projected/1d888f29-0d7f-4adf-b734-52390c1256e9-kube-api-access-7ccrv\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.649715 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-config-data\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.649900 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-scripts\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.650099 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d888f29-0d7f-4adf-b734-52390c1256e9-log-httpd\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.650269 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.650501 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.650853 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d888f29-0d7f-4adf-b734-52390c1256e9-run-httpd\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.678149 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.740859 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-8867v"] Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.741061 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" podUID="a156fc49-5ab5-47b4-833d-a92705928a35" containerName="dnsmasq-dns" containerID="cri-o://7c4fdf339839ded0807c29e105234db308fc053c4c275b341026b454d5f96478" gracePeriod=10 Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.753083 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.753147 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d888f29-0d7f-4adf-b734-52390c1256e9-run-httpd\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.753250 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.753286 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ccrv\" (UniqueName: \"kubernetes.io/projected/1d888f29-0d7f-4adf-b734-52390c1256e9-kube-api-access-7ccrv\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.753313 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-config-data\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.753329 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-scripts\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.753400 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d888f29-0d7f-4adf-b734-52390c1256e9-log-httpd\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.753455 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.755194 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d888f29-0d7f-4adf-b734-52390c1256e9-run-httpd\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.757173 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1d888f29-0d7f-4adf-b734-52390c1256e9-log-httpd\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.763067 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.764391 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.765021 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.766544 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-config-data\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.772433 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d888f29-0d7f-4adf-b734-52390c1256e9-scripts\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.783297 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ccrv\" (UniqueName: \"kubernetes.io/projected/1d888f29-0d7f-4adf-b734-52390c1256e9-kube-api-access-7ccrv\") pod \"ceilometer-0\" (UID: \"1d888f29-0d7f-4adf-b734-52390c1256e9\") " pod="openstack/ceilometer-0" Jan 22 05:39:35 crc kubenswrapper[4814]: I0122 05:39:35.916606 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.261363 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-b6chm" event={"ID":"79b7af13-1acf-421b-914f-2f8fd797cbe3","Type":"ContainerStarted","Data":"27a8b861a7885225d7763ea24046729800b4d5fd0d87211e36507fc8c55a71d9"} Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.263911 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-b6chm" event={"ID":"79b7af13-1acf-421b-914f-2f8fd797cbe3","Type":"ContainerStarted","Data":"40fe13b64f99bd46f5ade4b2eaacfde5912b499801c10441032399041d449814"} Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.264381 4814 generic.go:334] "Generic (PLEG): container finished" podID="a156fc49-5ab5-47b4-833d-a92705928a35" containerID="7c4fdf339839ded0807c29e105234db308fc053c4c275b341026b454d5f96478" exitCode=0 Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.264405 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" event={"ID":"a156fc49-5ab5-47b4-833d-a92705928a35","Type":"ContainerDied","Data":"7c4fdf339839ded0807c29e105234db308fc053c4c275b341026b454d5f96478"} Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.306352 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-b6chm" podStartSLOduration=2.306331366 podStartE2EDuration="2.306331366s" podCreationTimestamp="2026-01-22 05:39:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:36.287907128 +0000 UTC m=+1262.371395343" watchObservedRunningTime="2026-01-22 05:39:36.306331366 +0000 UTC m=+1262.389819581" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.373901 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02698b26-7a8b-45de-a007-c7b1723365a9" path="/var/lib/kubelet/pods/02698b26-7a8b-45de-a007-c7b1723365a9/volumes" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.427975 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.470351 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gg74r\" (UniqueName: \"kubernetes.io/projected/a156fc49-5ab5-47b4-833d-a92705928a35-kube-api-access-gg74r\") pod \"a156fc49-5ab5-47b4-833d-a92705928a35\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.470566 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-svc\") pod \"a156fc49-5ab5-47b4-833d-a92705928a35\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.470661 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-config\") pod \"a156fc49-5ab5-47b4-833d-a92705928a35\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.470711 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-sb\") pod \"a156fc49-5ab5-47b4-833d-a92705928a35\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.470759 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-swift-storage-0\") pod \"a156fc49-5ab5-47b4-833d-a92705928a35\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.470775 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-nb\") pod \"a156fc49-5ab5-47b4-833d-a92705928a35\" (UID: \"a156fc49-5ab5-47b4-833d-a92705928a35\") " Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.481724 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a156fc49-5ab5-47b4-833d-a92705928a35-kube-api-access-gg74r" (OuterVolumeSpecName: "kube-api-access-gg74r") pod "a156fc49-5ab5-47b4-833d-a92705928a35" (UID: "a156fc49-5ab5-47b4-833d-a92705928a35"). InnerVolumeSpecName "kube-api-access-gg74r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.558908 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-config" (OuterVolumeSpecName: "config") pod "a156fc49-5ab5-47b4-833d-a92705928a35" (UID: "a156fc49-5ab5-47b4-833d-a92705928a35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.565603 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a156fc49-5ab5-47b4-833d-a92705928a35" (UID: "a156fc49-5ab5-47b4-833d-a92705928a35"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.573700 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a156fc49-5ab5-47b4-833d-a92705928a35" (UID: "a156fc49-5ab5-47b4-833d-a92705928a35"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.573948 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.573974 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.573989 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.574001 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gg74r\" (UniqueName: \"kubernetes.io/projected/a156fc49-5ab5-47b4-833d-a92705928a35-kube-api-access-gg74r\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.575879 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a156fc49-5ab5-47b4-833d-a92705928a35" (UID: "a156fc49-5ab5-47b4-833d-a92705928a35"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.602732 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a156fc49-5ab5-47b4-833d-a92705928a35" (UID: "a156fc49-5ab5-47b4-833d-a92705928a35"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.675472 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:36.675513 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a156fc49-5ab5-47b4-833d-a92705928a35-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:37.277081 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:37.277665 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" event={"ID":"a156fc49-5ab5-47b4-833d-a92705928a35","Type":"ContainerDied","Data":"f6ffd6c1450a996ef382aa841f08dc2dfb61757ba5d0a301f0400b1148309aa9"} Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:37.277725 4814 scope.go:117] "RemoveContainer" containerID="7c4fdf339839ded0807c29e105234db308fc053c4c275b341026b454d5f96478" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:37.303707 4814 scope.go:117] "RemoveContainer" containerID="d0f9672ac5ac2db380dc9aac16c98076b696fb50073cec5e63bb229096c24724" Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:37.322877 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-8867v"] Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:37.336048 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-8867v"] Jan 22 05:39:37 crc kubenswrapper[4814]: I0122 05:39:37.454693 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 22 05:39:38 crc kubenswrapper[4814]: I0122 05:39:38.286576 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d888f29-0d7f-4adf-b734-52390c1256e9","Type":"ContainerStarted","Data":"c1213b066ce639f95de18951fafa1d45c08013960bf688cb3a31dfff03e1d467"} Jan 22 05:39:38 crc kubenswrapper[4814]: I0122 05:39:38.287579 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d888f29-0d7f-4adf-b734-52390c1256e9","Type":"ContainerStarted","Data":"5abce3e35c150e605073006e57f72671951c64fd00a7bab1bd40551be574d2ee"} Jan 22 05:39:38 crc kubenswrapper[4814]: I0122 05:39:38.353369 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a156fc49-5ab5-47b4-833d-a92705928a35" path="/var/lib/kubelet/pods/a156fc49-5ab5-47b4-833d-a92705928a35/volumes" Jan 22 05:39:39 crc kubenswrapper[4814]: I0122 05:39:39.295855 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d888f29-0d7f-4adf-b734-52390c1256e9","Type":"ContainerStarted","Data":"ab1498ea59362c3ec92ce21961b8a3d24fccf0cfa491e628bc676acb3bcdb75b"} Jan 22 05:39:40 crc kubenswrapper[4814]: I0122 05:39:40.305477 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d888f29-0d7f-4adf-b734-52390c1256e9","Type":"ContainerStarted","Data":"56dc7ac7751f00a5fe3cd891b174dd052c6c5380411ddc8633ae7b748df6989d"} Jan 22 05:39:41 crc kubenswrapper[4814]: I0122 05:39:41.144943 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5fbc4d444f-8867v" podUID="a156fc49-5ab5-47b4-833d-a92705928a35" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.204:5353: i/o timeout" Jan 22 05:39:41 crc kubenswrapper[4814]: I0122 05:39:41.317393 4814 generic.go:334] "Generic (PLEG): container finished" podID="79b7af13-1acf-421b-914f-2f8fd797cbe3" containerID="27a8b861a7885225d7763ea24046729800b4d5fd0d87211e36507fc8c55a71d9" exitCode=0 Jan 22 05:39:41 crc kubenswrapper[4814]: I0122 05:39:41.317462 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-b6chm" event={"ID":"79b7af13-1acf-421b-914f-2f8fd797cbe3","Type":"ContainerDied","Data":"27a8b861a7885225d7763ea24046729800b4d5fd0d87211e36507fc8c55a71d9"} Jan 22 05:39:41 crc kubenswrapper[4814]: I0122 05:39:41.320087 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1d888f29-0d7f-4adf-b734-52390c1256e9","Type":"ContainerStarted","Data":"177c15f5524204ee567a6036b026bdffe5d5d9c65bae513287591b2eb5ec79f7"} Jan 22 05:39:41 crc kubenswrapper[4814]: I0122 05:39:41.320273 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 22 05:39:41 crc kubenswrapper[4814]: I0122 05:39:41.363644 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.375233872 podStartE2EDuration="6.363615455s" podCreationTimestamp="2026-01-22 05:39:35 +0000 UTC" firstStartedPulling="2026-01-22 05:39:37.465407039 +0000 UTC m=+1263.548895254" lastFinishedPulling="2026-01-22 05:39:40.453788602 +0000 UTC m=+1266.537276837" observedRunningTime="2026-01-22 05:39:41.354398079 +0000 UTC m=+1267.437886294" watchObservedRunningTime="2026-01-22 05:39:41.363615455 +0000 UTC m=+1267.447103670" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.515207 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.515467 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.751120 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.799292 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snblv\" (UniqueName: \"kubernetes.io/projected/79b7af13-1acf-421b-914f-2f8fd797cbe3-kube-api-access-snblv\") pod \"79b7af13-1acf-421b-914f-2f8fd797cbe3\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.799332 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-scripts\") pod \"79b7af13-1acf-421b-914f-2f8fd797cbe3\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.799450 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-config-data\") pod \"79b7af13-1acf-421b-914f-2f8fd797cbe3\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.799518 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-combined-ca-bundle\") pod \"79b7af13-1acf-421b-914f-2f8fd797cbe3\" (UID: \"79b7af13-1acf-421b-914f-2f8fd797cbe3\") " Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.818775 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-scripts" (OuterVolumeSpecName: "scripts") pod "79b7af13-1acf-421b-914f-2f8fd797cbe3" (UID: "79b7af13-1acf-421b-914f-2f8fd797cbe3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.823695 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79b7af13-1acf-421b-914f-2f8fd797cbe3-kube-api-access-snblv" (OuterVolumeSpecName: "kube-api-access-snblv") pod "79b7af13-1acf-421b-914f-2f8fd797cbe3" (UID: "79b7af13-1acf-421b-914f-2f8fd797cbe3"). InnerVolumeSpecName "kube-api-access-snblv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.854808 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "79b7af13-1acf-421b-914f-2f8fd797cbe3" (UID: "79b7af13-1acf-421b-914f-2f8fd797cbe3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.855063 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-config-data" (OuterVolumeSpecName: "config-data") pod "79b7af13-1acf-421b-914f-2f8fd797cbe3" (UID: "79b7af13-1acf-421b-914f-2f8fd797cbe3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.912022 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snblv\" (UniqueName: \"kubernetes.io/projected/79b7af13-1acf-421b-914f-2f8fd797cbe3-kube-api-access-snblv\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.912060 4814 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-scripts\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.912072 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:42 crc kubenswrapper[4814]: I0122 05:39:42.912081 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79b7af13-1acf-421b-914f-2f8fd797cbe3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.337267 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-b6chm" event={"ID":"79b7af13-1acf-421b-914f-2f8fd797cbe3","Type":"ContainerDied","Data":"40fe13b64f99bd46f5ade4b2eaacfde5912b499801c10441032399041d449814"} Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.337306 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40fe13b64f99bd46f5ade4b2eaacfde5912b499801c10441032399041d449814" Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.337309 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-b6chm" Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.515073 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.515297 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-log" containerID="cri-o://a317f0fc6065690b1500e088e81a5683312fea0a30967ff2a4143434f1ac7723" gracePeriod=30 Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.515364 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-api" containerID="cri-o://26dbc5f9cc31a682231fc1cd1937469cbf05a324720be13945a4eb88ea09f0fa" gracePeriod=30 Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.523962 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.524130 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" containerName="nova-scheduler-scheduler" containerID="cri-o://0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca" gracePeriod=30 Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.525267 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.216:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.525352 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.617179 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.617520 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-metadata" containerID="cri-o://bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4" gracePeriod=30 Jan 22 05:39:43 crc kubenswrapper[4814]: I0122 05:39:43.617763 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-log" containerID="cri-o://79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b" gracePeriod=30 Jan 22 05:39:44 crc kubenswrapper[4814]: I0122 05:39:44.346390 4814 generic.go:334] "Generic (PLEG): container finished" podID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerID="79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b" exitCode=143 Jan 22 05:39:44 crc kubenswrapper[4814]: I0122 05:39:44.348246 4814 generic.go:334] "Generic (PLEG): container finished" podID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerID="a317f0fc6065690b1500e088e81a5683312fea0a30967ff2a4143434f1ac7723" exitCode=143 Jan 22 05:39:44 crc kubenswrapper[4814]: I0122 05:39:44.353676 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63a66f3-c438-4af3-8f8f-41278409d5a0","Type":"ContainerDied","Data":"79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b"} Jan 22 05:39:44 crc kubenswrapper[4814]: I0122 05:39:44.353705 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"725f36e1-6a7d-452b-bb96-0d8f18841546","Type":"ContainerDied","Data":"a317f0fc6065690b1500e088e81a5683312fea0a30967ff2a4143434f1ac7723"} Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.295822 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.380067 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.380373 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc","Type":"ContainerDied","Data":"0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca"} Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.380452 4814 scope.go:117] "RemoveContainer" containerID="0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.376610 4814 generic.go:334] "Generic (PLEG): container finished" podID="ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" containerID="0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca" exitCode=0 Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.381533 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc","Type":"ContainerDied","Data":"0ad48b24efe8386fb6eae88e68ea46cd1b9cd7370b389abf52abc3beb2fc4efe"} Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.403798 4814 scope.go:117] "RemoveContainer" containerID="0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca" Jan 22 05:39:45 crc kubenswrapper[4814]: E0122 05:39:45.404239 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca\": container with ID starting with 0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca not found: ID does not exist" containerID="0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.404279 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca"} err="failed to get container status \"0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca\": rpc error: code = NotFound desc = could not find container \"0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca\": container with ID starting with 0d4b6e824297d0b97d8c87e870b6de15fcc1fb274ae68a8b5e2c4bcb2ab518ca not found: ID does not exist" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.460269 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-combined-ca-bundle\") pod \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.460351 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mw85x\" (UniqueName: \"kubernetes.io/projected/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-kube-api-access-mw85x\") pod \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.460438 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-config-data\") pod \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\" (UID: \"ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc\") " Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.468841 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-kube-api-access-mw85x" (OuterVolumeSpecName: "kube-api-access-mw85x") pod "ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" (UID: "ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc"). InnerVolumeSpecName "kube-api-access-mw85x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.491565 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" (UID: "ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.497812 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-config-data" (OuterVolumeSpecName: "config-data") pod "ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" (UID: "ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.562543 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mw85x\" (UniqueName: \"kubernetes.io/projected/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-kube-api-access-mw85x\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.562574 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.562584 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.715882 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.727173 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.759549 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:45 crc kubenswrapper[4814]: E0122 05:39:45.760085 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" containerName="nova-scheduler-scheduler" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.760102 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" containerName="nova-scheduler-scheduler" Jan 22 05:39:45 crc kubenswrapper[4814]: E0122 05:39:45.760132 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b7af13-1acf-421b-914f-2f8fd797cbe3" containerName="nova-manage" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.760139 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b7af13-1acf-421b-914f-2f8fd797cbe3" containerName="nova-manage" Jan 22 05:39:45 crc kubenswrapper[4814]: E0122 05:39:45.760155 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a156fc49-5ab5-47b4-833d-a92705928a35" containerName="dnsmasq-dns" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.760161 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a156fc49-5ab5-47b4-833d-a92705928a35" containerName="dnsmasq-dns" Jan 22 05:39:45 crc kubenswrapper[4814]: E0122 05:39:45.760174 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a156fc49-5ab5-47b4-833d-a92705928a35" containerName="init" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.760180 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a156fc49-5ab5-47b4-833d-a92705928a35" containerName="init" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.760352 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a156fc49-5ab5-47b4-833d-a92705928a35" containerName="dnsmasq-dns" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.760376 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" containerName="nova-scheduler-scheduler" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.760385 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b7af13-1acf-421b-914f-2f8fd797cbe3" containerName="nova-manage" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.761044 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.764763 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.766017 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84da62b6-227f-40be-b4e3-967cd2b66c65-config-data\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.766141 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84da62b6-227f-40be-b4e3-967cd2b66c65-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.766260 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bphwl\" (UniqueName: \"kubernetes.io/projected/84da62b6-227f-40be-b4e3-967cd2b66c65-kube-api-access-bphwl\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.797852 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.868435 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bphwl\" (UniqueName: \"kubernetes.io/projected/84da62b6-227f-40be-b4e3-967cd2b66c65-kube-api-access-bphwl\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.868588 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84da62b6-227f-40be-b4e3-967cd2b66c65-config-data\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.868647 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84da62b6-227f-40be-b4e3-967cd2b66c65-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.872981 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84da62b6-227f-40be-b4e3-967cd2b66c65-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.873269 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84da62b6-227f-40be-b4e3-967cd2b66c65-config-data\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:45 crc kubenswrapper[4814]: I0122 05:39:45.886494 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bphwl\" (UniqueName: \"kubernetes.io/projected/84da62b6-227f-40be-b4e3-967cd2b66c65-kube-api-access-bphwl\") pod \"nova-scheduler-0\" (UID: \"84da62b6-227f-40be-b4e3-967cd2b66c65\") " pod="openstack/nova-scheduler-0" Jan 22 05:39:46 crc kubenswrapper[4814]: I0122 05:39:46.093550 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 22 05:39:46 crc kubenswrapper[4814]: I0122 05:39:46.359766 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc" path="/var/lib/kubelet/pods/ec0cbc1c-fcdb-4dfa-95a2-75b3bfb30adc/volumes" Jan 22 05:39:46 crc kubenswrapper[4814]: W0122 05:39:46.538475 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84da62b6_227f_40be_b4e3_967cd2b66c65.slice/crio-97c03ebba9b5bad90fb4ea1291eebaee11d3d03f17a7b7d7253c6ce1a2f4acde WatchSource:0}: Error finding container 97c03ebba9b5bad90fb4ea1291eebaee11d3d03f17a7b7d7253c6ce1a2f4acde: Status 404 returned error can't find the container with id 97c03ebba9b5bad90fb4ea1291eebaee11d3d03f17a7b7d7253c6ce1a2f4acde Jan 22 05:39:46 crc kubenswrapper[4814]: I0122 05:39:46.546463 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 22 05:39:46 crc kubenswrapper[4814]: I0122 05:39:46.781227 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.208:8775/\": read tcp 10.217.0.2:49798->10.217.0.208:8775: read: connection reset by peer" Jan 22 05:39:46 crc kubenswrapper[4814]: I0122 05:39:46.781294 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.208:8775/\": read tcp 10.217.0.2:49802->10.217.0.208:8775: read: connection reset by peer" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.180321 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.292327 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-nova-metadata-tls-certs\") pod \"d63a66f3-c438-4af3-8f8f-41278409d5a0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.292751 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nfrg\" (UniqueName: \"kubernetes.io/projected/d63a66f3-c438-4af3-8f8f-41278409d5a0-kube-api-access-7nfrg\") pod \"d63a66f3-c438-4af3-8f8f-41278409d5a0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.292788 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-combined-ca-bundle\") pod \"d63a66f3-c438-4af3-8f8f-41278409d5a0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.292894 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-config-data\") pod \"d63a66f3-c438-4af3-8f8f-41278409d5a0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.292920 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63a66f3-c438-4af3-8f8f-41278409d5a0-logs\") pod \"d63a66f3-c438-4af3-8f8f-41278409d5a0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.293765 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d63a66f3-c438-4af3-8f8f-41278409d5a0-logs" (OuterVolumeSpecName: "logs") pod "d63a66f3-c438-4af3-8f8f-41278409d5a0" (UID: "d63a66f3-c438-4af3-8f8f-41278409d5a0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.313290 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d63a66f3-c438-4af3-8f8f-41278409d5a0-kube-api-access-7nfrg" (OuterVolumeSpecName: "kube-api-access-7nfrg") pod "d63a66f3-c438-4af3-8f8f-41278409d5a0" (UID: "d63a66f3-c438-4af3-8f8f-41278409d5a0"). InnerVolumeSpecName "kube-api-access-7nfrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.381997 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-config-data" (OuterVolumeSpecName: "config-data") pod "d63a66f3-c438-4af3-8f8f-41278409d5a0" (UID: "d63a66f3-c438-4af3-8f8f-41278409d5a0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.401430 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "d63a66f3-c438-4af3-8f8f-41278409d5a0" (UID: "d63a66f3-c438-4af3-8f8f-41278409d5a0"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.406151 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-nova-metadata-tls-certs\") pod \"d63a66f3-c438-4af3-8f8f-41278409d5a0\" (UID: \"d63a66f3-c438-4af3-8f8f-41278409d5a0\") " Jan 22 05:39:47 crc kubenswrapper[4814]: W0122 05:39:47.407346 4814 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/d63a66f3-c438-4af3-8f8f-41278409d5a0/volumes/kubernetes.io~secret/nova-metadata-tls-certs Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.407409 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "d63a66f3-c438-4af3-8f8f-41278409d5a0" (UID: "d63a66f3-c438-4af3-8f8f-41278409d5a0"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.409005 4814 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.409027 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nfrg\" (UniqueName: \"kubernetes.io/projected/d63a66f3-c438-4af3-8f8f-41278409d5a0-kube-api-access-7nfrg\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.409041 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.409050 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d63a66f3-c438-4af3-8f8f-41278409d5a0-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.415313 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d63a66f3-c438-4af3-8f8f-41278409d5a0" (UID: "d63a66f3-c438-4af3-8f8f-41278409d5a0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.418204 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"84da62b6-227f-40be-b4e3-967cd2b66c65","Type":"ContainerStarted","Data":"c695ee6de60075bfdde0ef97c75c8856b176d1c5c401c2552eeac66f253d7746"} Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.418239 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"84da62b6-227f-40be-b4e3-967cd2b66c65","Type":"ContainerStarted","Data":"97c03ebba9b5bad90fb4ea1291eebaee11d3d03f17a7b7d7253c6ce1a2f4acde"} Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.421104 4814 generic.go:334] "Generic (PLEG): container finished" podID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerID="bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4" exitCode=0 Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.421150 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63a66f3-c438-4af3-8f8f-41278409d5a0","Type":"ContainerDied","Data":"bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4"} Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.421178 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d63a66f3-c438-4af3-8f8f-41278409d5a0","Type":"ContainerDied","Data":"c8714d699716cfb677099fc739cc6ff700da45220209a57f915339da3b7469a5"} Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.421195 4814 scope.go:117] "RemoveContainer" containerID="bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.421330 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.436743 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.436725002 podStartE2EDuration="2.436725002s" podCreationTimestamp="2026-01-22 05:39:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:47.433696117 +0000 UTC m=+1273.517184352" watchObservedRunningTime="2026-01-22 05:39:47.436725002 +0000 UTC m=+1273.520213217" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.481327 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.481846 4814 scope.go:117] "RemoveContainer" containerID="79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.498809 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.511666 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.511884 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d63a66f3-c438-4af3-8f8f-41278409d5a0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:47 crc kubenswrapper[4814]: E0122 05:39:47.512050 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-log" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.512067 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-log" Jan 22 05:39:47 crc kubenswrapper[4814]: E0122 05:39:47.512089 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-metadata" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.512095 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-metadata" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.512262 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-log" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.512292 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" containerName="nova-metadata-metadata" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.513196 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.515132 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.521448 4814 scope.go:117] "RemoveContainer" containerID="bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.522001 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 22 05:39:47 crc kubenswrapper[4814]: E0122 05:39:47.522144 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4\": container with ID starting with bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4 not found: ID does not exist" containerID="bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.522166 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4"} err="failed to get container status \"bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4\": rpc error: code = NotFound desc = could not find container \"bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4\": container with ID starting with bea11130c5b37c506075512ada453120f3780a1ff4989612aa80a1f856bfbdd4 not found: ID does not exist" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.522184 4814 scope.go:117] "RemoveContainer" containerID="79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.523954 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:47 crc kubenswrapper[4814]: E0122 05:39:47.524245 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b\": container with ID starting with 79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b not found: ID does not exist" containerID="79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.524269 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b"} err="failed to get container status \"79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b\": rpc error: code = NotFound desc = could not find container \"79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b\": container with ID starting with 79ae677039dc6a7a18f20054ed42a355632df462115a245c6529bc647867c09b not found: ID does not exist" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.613674 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w297g\" (UniqueName: \"kubernetes.io/projected/0e05bab5-c9c8-452b-85be-683346f24176-kube-api-access-w297g\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.613749 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.613837 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e05bab5-c9c8-452b-85be-683346f24176-logs\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.613901 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.613930 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-config-data\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.714998 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.715045 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-config-data\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.715120 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w297g\" (UniqueName: \"kubernetes.io/projected/0e05bab5-c9c8-452b-85be-683346f24176-kube-api-access-w297g\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.715902 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.716213 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e05bab5-c9c8-452b-85be-683346f24176-logs\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.716542 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e05bab5-c9c8-452b-85be-683346f24176-logs\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.719739 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-config-data\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.722291 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.722361 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e05bab5-c9c8-452b-85be-683346f24176-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.739177 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w297g\" (UniqueName: \"kubernetes.io/projected/0e05bab5-c9c8-452b-85be-683346f24176-kube-api-access-w297g\") pod \"nova-metadata-0\" (UID: \"0e05bab5-c9c8-452b-85be-683346f24176\") " pod="openstack/nova-metadata-0" Jan 22 05:39:47 crc kubenswrapper[4814]: I0122 05:39:47.842942 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 22 05:39:48 crc kubenswrapper[4814]: I0122 05:39:48.356367 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d63a66f3-c438-4af3-8f8f-41278409d5a0" path="/var/lib/kubelet/pods/d63a66f3-c438-4af3-8f8f-41278409d5a0/volumes" Jan 22 05:39:48 crc kubenswrapper[4814]: I0122 05:39:48.478720 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.443047 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e05bab5-c9c8-452b-85be-683346f24176","Type":"ContainerStarted","Data":"320e2ccb799380f08b443126f9048d3ff6a34a16efcd332558591c45144e89f6"} Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.443402 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e05bab5-c9c8-452b-85be-683346f24176","Type":"ContainerStarted","Data":"10e615a2e934b49d47afe3a7c7274fffbc99a4e2da535557a88f4c9dc77468c8"} Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.443414 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0e05bab5-c9c8-452b-85be-683346f24176","Type":"ContainerStarted","Data":"c1c55210eccd0468a34ea6a0f646fd695a341c47071be9847c0f63d11efa3ada"} Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.447136 4814 generic.go:334] "Generic (PLEG): container finished" podID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerID="26dbc5f9cc31a682231fc1cd1937469cbf05a324720be13945a4eb88ea09f0fa" exitCode=0 Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.447193 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"725f36e1-6a7d-452b-bb96-0d8f18841546","Type":"ContainerDied","Data":"26dbc5f9cc31a682231fc1cd1937469cbf05a324720be13945a4eb88ea09f0fa"} Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.447214 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"725f36e1-6a7d-452b-bb96-0d8f18841546","Type":"ContainerDied","Data":"0b2d48770741daa3c3d97b6e56074f8cf5adf26a6c76419d47d33319c3e86af9"} Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.447224 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b2d48770741daa3c3d97b6e56074f8cf5adf26a6c76419d47d33319c3e86af9" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.465955 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.465936361 podStartE2EDuration="2.465936361s" podCreationTimestamp="2026-01-22 05:39:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:49.461111221 +0000 UTC m=+1275.544599446" watchObservedRunningTime="2026-01-22 05:39:49.465936361 +0000 UTC m=+1275.549424576" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.472792 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.662373 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-config-data\") pod \"725f36e1-6a7d-452b-bb96-0d8f18841546\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.663805 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-combined-ca-bundle\") pod \"725f36e1-6a7d-452b-bb96-0d8f18841546\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.663975 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/725f36e1-6a7d-452b-bb96-0d8f18841546-logs\") pod \"725f36e1-6a7d-452b-bb96-0d8f18841546\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.664120 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-public-tls-certs\") pod \"725f36e1-6a7d-452b-bb96-0d8f18841546\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.664397 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-internal-tls-certs\") pod \"725f36e1-6a7d-452b-bb96-0d8f18841546\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.664436 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czgmm\" (UniqueName: \"kubernetes.io/projected/725f36e1-6a7d-452b-bb96-0d8f18841546-kube-api-access-czgmm\") pod \"725f36e1-6a7d-452b-bb96-0d8f18841546\" (UID: \"725f36e1-6a7d-452b-bb96-0d8f18841546\") " Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.664402 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/725f36e1-6a7d-452b-bb96-0d8f18841546-logs" (OuterVolumeSpecName: "logs") pod "725f36e1-6a7d-452b-bb96-0d8f18841546" (UID: "725f36e1-6a7d-452b-bb96-0d8f18841546"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.665286 4814 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/725f36e1-6a7d-452b-bb96-0d8f18841546-logs\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.670833 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/725f36e1-6a7d-452b-bb96-0d8f18841546-kube-api-access-czgmm" (OuterVolumeSpecName: "kube-api-access-czgmm") pod "725f36e1-6a7d-452b-bb96-0d8f18841546" (UID: "725f36e1-6a7d-452b-bb96-0d8f18841546"). InnerVolumeSpecName "kube-api-access-czgmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.710220 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-config-data" (OuterVolumeSpecName: "config-data") pod "725f36e1-6a7d-452b-bb96-0d8f18841546" (UID: "725f36e1-6a7d-452b-bb96-0d8f18841546"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.717956 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "725f36e1-6a7d-452b-bb96-0d8f18841546" (UID: "725f36e1-6a7d-452b-bb96-0d8f18841546"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.762001 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "725f36e1-6a7d-452b-bb96-0d8f18841546" (UID: "725f36e1-6a7d-452b-bb96-0d8f18841546"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.766635 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "725f36e1-6a7d-452b-bb96-0d8f18841546" (UID: "725f36e1-6a7d-452b-bb96-0d8f18841546"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.767159 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.767198 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.767212 4814 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.767225 4814 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/725f36e1-6a7d-452b-bb96-0d8f18841546-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:49 crc kubenswrapper[4814]: I0122 05:39:49.767236 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czgmm\" (UniqueName: \"kubernetes.io/projected/725f36e1-6a7d-452b-bb96-0d8f18841546-kube-api-access-czgmm\") on node \"crc\" DevicePath \"\"" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.456551 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.487710 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.512215 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.532904 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:50 crc kubenswrapper[4814]: E0122 05:39:50.533409 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-api" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.533431 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-api" Jan 22 05:39:50 crc kubenswrapper[4814]: E0122 05:39:50.533462 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-log" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.533491 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-log" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.533806 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-log" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.533824 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" containerName="nova-api-api" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.535002 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.537803 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.538122 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.538239 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.560146 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.693221 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsqmq\" (UniqueName: \"kubernetes.io/projected/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-kube-api-access-tsqmq\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.693271 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-public-tls-certs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.693354 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-logs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.693376 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.693401 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-config-data\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.693424 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.795999 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-logs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.796040 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.796070 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-config-data\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.796095 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.796164 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsqmq\" (UniqueName: \"kubernetes.io/projected/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-kube-api-access-tsqmq\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.796186 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-public-tls-certs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.796937 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-logs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.812231 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-public-tls-certs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.812382 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-config-data\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.820396 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.820836 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.825318 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsqmq\" (UniqueName: \"kubernetes.io/projected/e16e2647-5f62-4f76-b876-0ad5f8f35f1f-kube-api-access-tsqmq\") pod \"nova-api-0\" (UID: \"e16e2647-5f62-4f76-b876-0ad5f8f35f1f\") " pod="openstack/nova-api-0" Jan 22 05:39:50 crc kubenswrapper[4814]: I0122 05:39:50.856817 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 22 05:39:51 crc kubenswrapper[4814]: I0122 05:39:51.094930 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 22 05:39:51 crc kubenswrapper[4814]: I0122 05:39:51.338593 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 22 05:39:51 crc kubenswrapper[4814]: I0122 05:39:51.469061 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e16e2647-5f62-4f76-b876-0ad5f8f35f1f","Type":"ContainerStarted","Data":"b0ebaf40ac1dd8add7d141a8e6bbf5724ee7edacf7c58c6970f1aa9b3ac7514c"} Jan 22 05:39:52 crc kubenswrapper[4814]: I0122 05:39:52.355320 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="725f36e1-6a7d-452b-bb96-0d8f18841546" path="/var/lib/kubelet/pods/725f36e1-6a7d-452b-bb96-0d8f18841546/volumes" Jan 22 05:39:52 crc kubenswrapper[4814]: I0122 05:39:52.492891 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e16e2647-5f62-4f76-b876-0ad5f8f35f1f","Type":"ContainerStarted","Data":"ca75b393a23451cd85f7a4b305a0a00791de4ad48f0f08c36dcf4ac305916405"} Jan 22 05:39:52 crc kubenswrapper[4814]: I0122 05:39:52.492934 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e16e2647-5f62-4f76-b876-0ad5f8f35f1f","Type":"ContainerStarted","Data":"47beb28b770fbb4329ea46a95a0042b636091b21293a0013e55c9618a79819eb"} Jan 22 05:39:52 crc kubenswrapper[4814]: I0122 05:39:52.525444 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.5254224819999997 podStartE2EDuration="2.525422482s" podCreationTimestamp="2026-01-22 05:39:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:39:52.519729735 +0000 UTC m=+1278.603217980" watchObservedRunningTime="2026-01-22 05:39:52.525422482 +0000 UTC m=+1278.608910697" Jan 22 05:39:52 crc kubenswrapper[4814]: I0122 05:39:52.843900 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 05:39:52 crc kubenswrapper[4814]: I0122 05:39:52.843938 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 22 05:39:56 crc kubenswrapper[4814]: I0122 05:39:56.094587 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 22 05:39:56 crc kubenswrapper[4814]: I0122 05:39:56.121379 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 22 05:39:56 crc kubenswrapper[4814]: I0122 05:39:56.583871 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 22 05:39:57 crc kubenswrapper[4814]: I0122 05:39:57.843803 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 05:39:57 crc kubenswrapper[4814]: I0122 05:39:57.844071 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 22 05:39:58 crc kubenswrapper[4814]: I0122 05:39:58.855777 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0e05bab5-c9c8-452b-85be-683346f24176" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.220:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 05:39:58 crc kubenswrapper[4814]: I0122 05:39:58.855795 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0e05bab5-c9c8-452b-85be-683346f24176" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.220:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 05:40:00 crc kubenswrapper[4814]: I0122 05:40:00.857971 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 05:40:00 crc kubenswrapper[4814]: I0122 05:40:00.859041 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 22 05:40:01 crc kubenswrapper[4814]: I0122 05:40:01.870758 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e16e2647-5f62-4f76-b876-0ad5f8f35f1f" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.221:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 05:40:01 crc kubenswrapper[4814]: I0122 05:40:01.871542 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e16e2647-5f62-4f76-b876-0ad5f8f35f1f" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.221:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 22 05:40:05 crc kubenswrapper[4814]: I0122 05:40:05.928400 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 22 05:40:07 crc kubenswrapper[4814]: I0122 05:40:07.852392 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 05:40:07 crc kubenswrapper[4814]: I0122 05:40:07.857136 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 22 05:40:07 crc kubenswrapper[4814]: I0122 05:40:07.860040 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 05:40:08 crc kubenswrapper[4814]: I0122 05:40:08.667987 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 22 05:40:10 crc kubenswrapper[4814]: I0122 05:40:10.864813 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 05:40:10 crc kubenswrapper[4814]: I0122 05:40:10.865574 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 05:40:10 crc kubenswrapper[4814]: I0122 05:40:10.871185 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 22 05:40:10 crc kubenswrapper[4814]: I0122 05:40:10.873282 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 05:40:11 crc kubenswrapper[4814]: I0122 05:40:11.687272 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 22 05:40:11 crc kubenswrapper[4814]: I0122 05:40:11.746404 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 22 05:40:19 crc kubenswrapper[4814]: I0122 05:40:19.614488 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:40:19 crc kubenswrapper[4814]: I0122 05:40:19.614909 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:40:21 crc kubenswrapper[4814]: I0122 05:40:21.742419 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:40:22 crc kubenswrapper[4814]: I0122 05:40:22.887057 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:40:26 crc kubenswrapper[4814]: I0122 05:40:26.617731 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" containerName="rabbitmq" containerID="cri-o://08cdcd6761743637a632ae626175a130c736560dbfb3feee844b790b598e0fb5" gracePeriod=604796 Jan 22 05:40:27 crc kubenswrapper[4814]: I0122 05:40:27.606213 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" containerName="rabbitmq" containerID="cri-o://97f257806b7f3681374dbe001c01a6167cab18ac3f035513fa06dd0adb750034" gracePeriod=604796 Jan 22 05:40:32 crc kubenswrapper[4814]: I0122 05:40:32.899705 4814 generic.go:334] "Generic (PLEG): container finished" podID="22c14c36-2eb5-424d-a919-25f2e99eeb44" containerID="08cdcd6761743637a632ae626175a130c736560dbfb3feee844b790b598e0fb5" exitCode=0 Jan 22 05:40:32 crc kubenswrapper[4814]: I0122 05:40:32.900098 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"22c14c36-2eb5-424d-a919-25f2e99eeb44","Type":"ContainerDied","Data":"08cdcd6761743637a632ae626175a130c736560dbfb3feee844b790b598e0fb5"} Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.169795 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.290746 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-server-conf\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.290791 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/22c14c36-2eb5-424d-a919-25f2e99eeb44-pod-info\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.290837 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-996wr\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-kube-api-access-996wr\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.290907 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-erlang-cookie\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.290924 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-tls\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.291005 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-plugins\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.291027 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-plugins-conf\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.291049 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-config-data\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.291083 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/22c14c36-2eb5-424d-a919-25f2e99eeb44-erlang-cookie-secret\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.291125 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-confd\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.291164 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"22c14c36-2eb5-424d-a919-25f2e99eeb44\" (UID: \"22c14c36-2eb5-424d-a919-25f2e99eeb44\") " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.302334 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.302777 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.303039 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.303742 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.304277 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-kube-api-access-996wr" (OuterVolumeSpecName: "kube-api-access-996wr") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "kube-api-access-996wr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.305981 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.311274 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c14c36-2eb5-424d-a919-25f2e99eeb44-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.316253 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/22c14c36-2eb5-424d-a919-25f2e99eeb44-pod-info" (OuterVolumeSpecName: "pod-info") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.335124 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-config-data" (OuterVolumeSpecName: "config-data") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.377794 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-server-conf" (OuterVolumeSpecName: "server-conf") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393042 4814 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393070 4814 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393078 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393089 4814 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/22c14c36-2eb5-424d-a919-25f2e99eeb44-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393113 4814 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393123 4814 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/22c14c36-2eb5-424d-a919-25f2e99eeb44-server-conf\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393131 4814 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/22c14c36-2eb5-424d-a919-25f2e99eeb44-pod-info\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393144 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-996wr\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-kube-api-access-996wr\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393156 4814 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.393167 4814 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.418876 4814 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.456865 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "22c14c36-2eb5-424d-a919-25f2e99eeb44" (UID: "22c14c36-2eb5-424d-a919-25f2e99eeb44"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.495123 4814 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/22c14c36-2eb5-424d-a919-25f2e99eeb44-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.495164 4814 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.929202 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"22c14c36-2eb5-424d-a919-25f2e99eeb44","Type":"ContainerDied","Data":"bcdc77fa9c2fd1b955df4add3c676763ee516ec4baf474ad8b81872b63c01527"} Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.929250 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.929263 4814 scope.go:117] "RemoveContainer" containerID="08cdcd6761743637a632ae626175a130c736560dbfb3feee844b790b598e0fb5" Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.942324 4814 generic.go:334] "Generic (PLEG): container finished" podID="14a83f70-2b64-417d-a198-d51bb829cea1" containerID="97f257806b7f3681374dbe001c01a6167cab18ac3f035513fa06dd0adb750034" exitCode=0 Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.942370 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"14a83f70-2b64-417d-a198-d51bb829cea1","Type":"ContainerDied","Data":"97f257806b7f3681374dbe001c01a6167cab18ac3f035513fa06dd0adb750034"} Jan 22 05:40:33 crc kubenswrapper[4814]: I0122 05:40:33.983247 4814 scope.go:117] "RemoveContainer" containerID="a859b6b6c8244733d3bfd805c35ab89852ef9de42452cfa756a80e1942fce6bc" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.013471 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.040336 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.085936 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:40:34 crc kubenswrapper[4814]: E0122 05:40:34.086328 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" containerName="setup-container" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.086339 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" containerName="setup-container" Jan 22 05:40:34 crc kubenswrapper[4814]: E0122 05:40:34.086367 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" containerName="rabbitmq" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.086373 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" containerName="rabbitmq" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.086542 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" containerName="rabbitmq" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.087450 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.095172 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.096008 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.096122 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.096237 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.096375 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.096526 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-qbc67" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.097932 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.113506 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.219994 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.220041 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbjcn\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-kube-api-access-dbjcn\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.220077 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.220109 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.220149 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/de0f9aac-bcdb-41fa-952c-0c421486dedc-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.220175 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-server-conf\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.220196 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/de0f9aac-bcdb-41fa-952c-0c421486dedc-pod-info\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.227818 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.227848 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.227914 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-config-data\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.227949 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329285 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-config-data\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329336 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329375 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329400 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbjcn\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-kube-api-access-dbjcn\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329427 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329453 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329491 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/de0f9aac-bcdb-41fa-952c-0c421486dedc-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329512 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-server-conf\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329533 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/de0f9aac-bcdb-41fa-952c-0c421486dedc-pod-info\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329558 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.329575 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.330935 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-config-data\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.331195 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.360480 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.373853 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.374157 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.376127 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/de0f9aac-bcdb-41fa-952c-0c421486dedc-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.377006 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-server-conf\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.379535 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/de0f9aac-bcdb-41fa-952c-0c421486dedc-pod-info\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.380163 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/de0f9aac-bcdb-41fa-952c-0c421486dedc-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.382814 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.950563 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbjcn\" (UniqueName: \"kubernetes.io/projected/de0f9aac-bcdb-41fa-952c-0c421486dedc-kube-api-access-dbjcn\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.976118 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c14c36-2eb5-424d-a919-25f2e99eeb44" path="/var/lib/kubelet/pods/22c14c36-2eb5-424d-a919-25f2e99eeb44/volumes" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.978560 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"14a83f70-2b64-417d-a198-d51bb829cea1","Type":"ContainerDied","Data":"ca62004ceebd500208f0b9728b4b066805c819f8709ef3164c876dd5cab34b62"} Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.978595 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca62004ceebd500208f0b9728b4b066805c819f8709ef3164c876dd5cab34b62" Jan 22 05:40:34 crc kubenswrapper[4814]: I0122 05:40:34.990596 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"de0f9aac-bcdb-41fa-952c-0c421486dedc\") " pod="openstack/rabbitmq-server-0" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.014617 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.030540 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223143 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-config-data\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223415 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/14a83f70-2b64-417d-a198-d51bb829cea1-erlang-cookie-secret\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223458 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-plugins\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223484 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-erlang-cookie\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223507 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/14a83f70-2b64-417d-a198-d51bb829cea1-pod-info\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223524 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-confd\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223576 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-tls\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223618 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-server-conf\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223727 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-plugins-conf\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223845 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.223864 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcmmw\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-kube-api-access-jcmmw\") pod \"14a83f70-2b64-417d-a198-d51bb829cea1\" (UID: \"14a83f70-2b64-417d-a198-d51bb829cea1\") " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.224551 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.225085 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.225430 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.227963 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/14a83f70-2b64-417d-a198-d51bb829cea1-pod-info" (OuterVolumeSpecName: "pod-info") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.228463 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.236916 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.236984 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-kube-api-access-jcmmw" (OuterVolumeSpecName: "kube-api-access-jcmmw") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "kube-api-access-jcmmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.244284 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14a83f70-2b64-417d-a198-d51bb829cea1-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.255227 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-config-data" (OuterVolumeSpecName: "config-data") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.310698 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-server-conf" (OuterVolumeSpecName: "server-conf") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326235 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326258 4814 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/14a83f70-2b64-417d-a198-d51bb829cea1-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326270 4814 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326278 4814 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326286 4814 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/14a83f70-2b64-417d-a198-d51bb829cea1-pod-info\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326293 4814 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326301 4814 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-server-conf\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326311 4814 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/14a83f70-2b64-417d-a198-d51bb829cea1-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326336 4814 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.326344 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcmmw\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-kube-api-access-jcmmw\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.360124 4814 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.370802 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "14a83f70-2b64-417d-a198-d51bb829cea1" (UID: "14a83f70-2b64-417d-a198-d51bb829cea1"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.427633 4814 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.427663 4814 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/14a83f70-2b64-417d-a198-d51bb829cea1-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:35 crc kubenswrapper[4814]: I0122 05:40:35.532138 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 22 05:40:35 crc kubenswrapper[4814]: W0122 05:40:35.539562 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde0f9aac_bcdb_41fa_952c_0c421486dedc.slice/crio-68c7850cdd8a429befb53e7e9d9d2cd30eb5bb3129509773426b8f88fdb29c99 WatchSource:0}: Error finding container 68c7850cdd8a429befb53e7e9d9d2cd30eb5bb3129509773426b8f88fdb29c99: Status 404 returned error can't find the container with id 68c7850cdd8a429befb53e7e9d9d2cd30eb5bb3129509773426b8f88fdb29c99 Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.002355 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"de0f9aac-bcdb-41fa-952c-0c421486dedc","Type":"ContainerStarted","Data":"68c7850cdd8a429befb53e7e9d9d2cd30eb5bb3129509773426b8f88fdb29c99"} Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.002369 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.032184 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.049117 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.061198 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:40:36 crc kubenswrapper[4814]: E0122 05:40:36.061609 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" containerName="setup-container" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.061652 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" containerName="setup-container" Jan 22 05:40:36 crc kubenswrapper[4814]: E0122 05:40:36.061664 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" containerName="rabbitmq" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.061671 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" containerName="rabbitmq" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.061837 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" containerName="rabbitmq" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.062972 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.066534 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.066566 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.066885 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.066951 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.067037 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-4g4bt" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.067347 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.068672 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.088113 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242062 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242109 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52cm7\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-kube-api-access-52cm7\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242154 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242174 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242196 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/229a7e07-faf2-47ae-b9c2-b419ebae805f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242213 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242240 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242263 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242309 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/229a7e07-faf2-47ae-b9c2-b419ebae805f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242347 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.242374 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.344585 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/229a7e07-faf2-47ae-b9c2-b419ebae805f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345180 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345219 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345249 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345270 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52cm7\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-kube-api-access-52cm7\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345309 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345325 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345347 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/229a7e07-faf2-47ae-b9c2-b419ebae805f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345362 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345387 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345405 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345462 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.345702 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.346248 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.346483 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.347852 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.348232 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/229a7e07-faf2-47ae-b9c2-b419ebae805f-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.352064 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/229a7e07-faf2-47ae-b9c2-b419ebae805f-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.353350 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.353861 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.357559 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14a83f70-2b64-417d-a198-d51bb829cea1" path="/var/lib/kubelet/pods/14a83f70-2b64-417d-a198-d51bb829cea1/volumes" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.364102 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52cm7\" (UniqueName: \"kubernetes.io/projected/229a7e07-faf2-47ae-b9c2-b419ebae805f-kube-api-access-52cm7\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.372546 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/229a7e07-faf2-47ae-b9c2-b419ebae805f-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.492302 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"229a7e07-faf2-47ae-b9c2-b419ebae805f\") " pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:36 crc kubenswrapper[4814]: I0122 05:40:36.678653 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:40:37 crc kubenswrapper[4814]: I0122 05:40:37.014382 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"de0f9aac-bcdb-41fa-952c-0c421486dedc","Type":"ContainerStarted","Data":"6636ec06976f4bdc3a1d6770cfd95c6e4bf75c732dff75e9d1fe8079f925a0ca"} Jan 22 05:40:37 crc kubenswrapper[4814]: W0122 05:40:37.114923 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod229a7e07_faf2_47ae_b9c2_b419ebae805f.slice/crio-a0f49218070232d8b6ff1fe6578d5d67d76005fd80f8c756c40edf30dc8c4588 WatchSource:0}: Error finding container a0f49218070232d8b6ff1fe6578d5d67d76005fd80f8c756c40edf30dc8c4588: Status 404 returned error can't find the container with id a0f49218070232d8b6ff1fe6578d5d67d76005fd80f8c756c40edf30dc8c4588 Jan 22 05:40:37 crc kubenswrapper[4814]: I0122 05:40:37.115304 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.024860 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"229a7e07-faf2-47ae-b9c2-b419ebae805f","Type":"ContainerStarted","Data":"a0f49218070232d8b6ff1fe6578d5d67d76005fd80f8c756c40edf30dc8c4588"} Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.457507 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68df85789f-mhpp5"] Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.458899 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.460791 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.469066 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-mhpp5"] Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.601939 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.602219 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-config\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.602268 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.602369 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.602486 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-svc\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.602561 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fp7m\" (UniqueName: \"kubernetes.io/projected/6bd07c23-9711-4ba2-86ad-f921d0834712-kube-api-access-2fp7m\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.602667 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.704408 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-svc\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.704477 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fp7m\" (UniqueName: \"kubernetes.io/projected/6bd07c23-9711-4ba2-86ad-f921d0834712-kube-api-access-2fp7m\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.704553 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.704611 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.704691 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-config\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.704730 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.704787 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.705545 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.705670 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-config\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.705824 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.706226 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.706372 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-svc\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.706539 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.727591 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fp7m\" (UniqueName: \"kubernetes.io/projected/6bd07c23-9711-4ba2-86ad-f921d0834712-kube-api-access-2fp7m\") pod \"dnsmasq-dns-68df85789f-mhpp5\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:38 crc kubenswrapper[4814]: I0122 05:40:38.774590 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:39 crc kubenswrapper[4814]: I0122 05:40:39.036146 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"229a7e07-faf2-47ae-b9c2-b419ebae805f","Type":"ContainerStarted","Data":"aa88df98067b324ca7a4006e8a8f9421c64fc7c51332a7ac6e95573f4a57e3dd"} Jan 22 05:40:39 crc kubenswrapper[4814]: I0122 05:40:39.248536 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-mhpp5"] Jan 22 05:40:40 crc kubenswrapper[4814]: I0122 05:40:40.044838 4814 generic.go:334] "Generic (PLEG): container finished" podID="6bd07c23-9711-4ba2-86ad-f921d0834712" containerID="c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344" exitCode=0 Jan 22 05:40:40 crc kubenswrapper[4814]: I0122 05:40:40.045057 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" event={"ID":"6bd07c23-9711-4ba2-86ad-f921d0834712","Type":"ContainerDied","Data":"c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344"} Jan 22 05:40:40 crc kubenswrapper[4814]: I0122 05:40:40.045120 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" event={"ID":"6bd07c23-9711-4ba2-86ad-f921d0834712","Type":"ContainerStarted","Data":"dd264a797c74505c8ffd67957eb055055e543c9ae72b64b3673d23b6605e40bb"} Jan 22 05:40:41 crc kubenswrapper[4814]: I0122 05:40:41.055758 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" event={"ID":"6bd07c23-9711-4ba2-86ad-f921d0834712","Type":"ContainerStarted","Data":"a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e"} Jan 22 05:40:41 crc kubenswrapper[4814]: I0122 05:40:41.056639 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:41 crc kubenswrapper[4814]: I0122 05:40:41.081428 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" podStartSLOduration=3.081410252 podStartE2EDuration="3.081410252s" podCreationTimestamp="2026-01-22 05:40:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:40:41.071476744 +0000 UTC m=+1327.154964959" watchObservedRunningTime="2026-01-22 05:40:41.081410252 +0000 UTC m=+1327.164898477" Jan 22 05:40:48 crc kubenswrapper[4814]: I0122 05:40:48.776937 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:40:48 crc kubenswrapper[4814]: I0122 05:40:48.851382 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-79fbn"] Jan 22 05:40:48 crc kubenswrapper[4814]: I0122 05:40:48.851655 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" podUID="b575a98c-ea17-4a9a-b796-d54720b31dfa" containerName="dnsmasq-dns" containerID="cri-o://b59c5c88b85597c6d0ded98b237871ae9f80196b2071bef48b22795f20cfc06f" gracePeriod=10 Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.088990 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc"] Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.090440 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.114417 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc"] Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.173205 4814 generic.go:334] "Generic (PLEG): container finished" podID="b575a98c-ea17-4a9a-b796-d54720b31dfa" containerID="b59c5c88b85597c6d0ded98b237871ae9f80196b2071bef48b22795f20cfc06f" exitCode=0 Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.173257 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" event={"ID":"b575a98c-ea17-4a9a-b796-d54720b31dfa","Type":"ContainerDied","Data":"b59c5c88b85597c6d0ded98b237871ae9f80196b2071bef48b22795f20cfc06f"} Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.233057 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-dns-swift-storage-0\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.233319 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-ovsdbserver-sb\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.233339 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-ovsdbserver-nb\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.233454 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-config\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.233482 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-openstack-edpm-ipam\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.233497 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk2hq\" (UniqueName: \"kubernetes.io/projected/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-kube-api-access-qk2hq\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.233532 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-dns-svc\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.334789 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-dns-swift-storage-0\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.334842 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-ovsdbserver-sb\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.334866 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-ovsdbserver-nb\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.334990 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-config\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.335029 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-openstack-edpm-ipam\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.335060 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk2hq\" (UniqueName: \"kubernetes.io/projected/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-kube-api-access-qk2hq\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.335102 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-dns-svc\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.336170 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-ovsdbserver-nb\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.336231 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-dns-svc\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.336780 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-dns-swift-storage-0\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.336912 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-config\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.337362 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-ovsdbserver-sb\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.337682 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-openstack-edpm-ipam\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.358962 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk2hq\" (UniqueName: \"kubernetes.io/projected/23aa41bd-c155-4c2f-b05e-a0668d89f9d5-kube-api-access-qk2hq\") pod \"dnsmasq-dns-7f8ff5ffbc-vm6jc\" (UID: \"23aa41bd-c155-4c2f-b05e-a0668d89f9d5\") " pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.424329 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.580708 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.619220 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.619263 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.677398 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-swift-storage-0\") pod \"b575a98c-ea17-4a9a-b796-d54720b31dfa\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.679130 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcwfl\" (UniqueName: \"kubernetes.io/projected/b575a98c-ea17-4a9a-b796-d54720b31dfa-kube-api-access-dcwfl\") pod \"b575a98c-ea17-4a9a-b796-d54720b31dfa\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.679308 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-nb\") pod \"b575a98c-ea17-4a9a-b796-d54720b31dfa\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.679702 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-config\") pod \"b575a98c-ea17-4a9a-b796-d54720b31dfa\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.679784 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-svc\") pod \"b575a98c-ea17-4a9a-b796-d54720b31dfa\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.679950 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-sb\") pod \"b575a98c-ea17-4a9a-b796-d54720b31dfa\" (UID: \"b575a98c-ea17-4a9a-b796-d54720b31dfa\") " Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.693711 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b575a98c-ea17-4a9a-b796-d54720b31dfa-kube-api-access-dcwfl" (OuterVolumeSpecName: "kube-api-access-dcwfl") pod "b575a98c-ea17-4a9a-b796-d54720b31dfa" (UID: "b575a98c-ea17-4a9a-b796-d54720b31dfa"). InnerVolumeSpecName "kube-api-access-dcwfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.745113 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-config" (OuterVolumeSpecName: "config") pod "b575a98c-ea17-4a9a-b796-d54720b31dfa" (UID: "b575a98c-ea17-4a9a-b796-d54720b31dfa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.777238 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b575a98c-ea17-4a9a-b796-d54720b31dfa" (UID: "b575a98c-ea17-4a9a-b796-d54720b31dfa"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.785733 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b575a98c-ea17-4a9a-b796-d54720b31dfa" (UID: "b575a98c-ea17-4a9a-b796-d54720b31dfa"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.786215 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.786283 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.786334 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.786385 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcwfl\" (UniqueName: \"kubernetes.io/projected/b575a98c-ea17-4a9a-b796-d54720b31dfa-kube-api-access-dcwfl\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.789886 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b575a98c-ea17-4a9a-b796-d54720b31dfa" (UID: "b575a98c-ea17-4a9a-b796-d54720b31dfa"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.794887 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b575a98c-ea17-4a9a-b796-d54720b31dfa" (UID: "b575a98c-ea17-4a9a-b796-d54720b31dfa"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.888076 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.888108 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b575a98c-ea17-4a9a-b796-d54720b31dfa-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:40:49 crc kubenswrapper[4814]: I0122 05:40:49.942596 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc"] Jan 22 05:40:50 crc kubenswrapper[4814]: I0122 05:40:50.182553 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" Jan 22 05:40:50 crc kubenswrapper[4814]: I0122 05:40:50.182608 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-79fbn" event={"ID":"b575a98c-ea17-4a9a-b796-d54720b31dfa","Type":"ContainerDied","Data":"9d3b8a8c64899afe7090a7996942db6d26fa44a7db2f7fb0fc15a7b9fc642d9e"} Jan 22 05:40:50 crc kubenswrapper[4814]: I0122 05:40:50.182994 4814 scope.go:117] "RemoveContainer" containerID="b59c5c88b85597c6d0ded98b237871ae9f80196b2071bef48b22795f20cfc06f" Jan 22 05:40:50 crc kubenswrapper[4814]: I0122 05:40:50.184057 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" event={"ID":"23aa41bd-c155-4c2f-b05e-a0668d89f9d5","Type":"ContainerStarted","Data":"5eac8532b42471e20898b15597d768ea569680f5fab9c102f9c23f4104bc98ea"} Jan 22 05:40:50 crc kubenswrapper[4814]: I0122 05:40:50.219842 4814 scope.go:117] "RemoveContainer" containerID="493b0c05d79200619d9adc7aa3582e162e8a72132d4ecd57c46088f6928be9b4" Jan 22 05:40:50 crc kubenswrapper[4814]: I0122 05:40:50.225991 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-79fbn"] Jan 22 05:40:50 crc kubenswrapper[4814]: I0122 05:40:50.234067 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-79fbn"] Jan 22 05:40:50 crc kubenswrapper[4814]: I0122 05:40:50.357314 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b575a98c-ea17-4a9a-b796-d54720b31dfa" path="/var/lib/kubelet/pods/b575a98c-ea17-4a9a-b796-d54720b31dfa/volumes" Jan 22 05:40:51 crc kubenswrapper[4814]: I0122 05:40:51.196812 4814 generic.go:334] "Generic (PLEG): container finished" podID="23aa41bd-c155-4c2f-b05e-a0668d89f9d5" containerID="6911b0af5febb023596892cbf42798d3bbe5bd4bb480cdb202e2d13daf70911d" exitCode=0 Jan 22 05:40:51 crc kubenswrapper[4814]: I0122 05:40:51.196921 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" event={"ID":"23aa41bd-c155-4c2f-b05e-a0668d89f9d5","Type":"ContainerDied","Data":"6911b0af5febb023596892cbf42798d3bbe5bd4bb480cdb202e2d13daf70911d"} Jan 22 05:40:52 crc kubenswrapper[4814]: I0122 05:40:52.220374 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" event={"ID":"23aa41bd-c155-4c2f-b05e-a0668d89f9d5","Type":"ContainerStarted","Data":"2de7218023d486331f713a087e1a515cbc4efb0f1c7fa288fb28bddbc3ce578e"} Jan 22 05:40:52 crc kubenswrapper[4814]: I0122 05:40:52.221098 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:52 crc kubenswrapper[4814]: I0122 05:40:52.252445 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" podStartSLOduration=3.252411954 podStartE2EDuration="3.252411954s" podCreationTimestamp="2026-01-22 05:40:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:40:52.247085499 +0000 UTC m=+1338.330573744" watchObservedRunningTime="2026-01-22 05:40:52.252411954 +0000 UTC m=+1338.335900209" Jan 22 05:40:59 crc kubenswrapper[4814]: I0122 05:40:59.427734 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7f8ff5ffbc-vm6jc" Jan 22 05:40:59 crc kubenswrapper[4814]: I0122 05:40:59.492611 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-mhpp5"] Jan 22 05:40:59 crc kubenswrapper[4814]: I0122 05:40:59.500992 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" podUID="6bd07c23-9711-4ba2-86ad-f921d0834712" containerName="dnsmasq-dns" containerID="cri-o://a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e" gracePeriod=10 Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.004864 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.144347 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-config\") pod \"6bd07c23-9711-4ba2-86ad-f921d0834712\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.144416 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-svc\") pod \"6bd07c23-9711-4ba2-86ad-f921d0834712\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.144449 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-nb\") pod \"6bd07c23-9711-4ba2-86ad-f921d0834712\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.144506 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-swift-storage-0\") pod \"6bd07c23-9711-4ba2-86ad-f921d0834712\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.144543 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fp7m\" (UniqueName: \"kubernetes.io/projected/6bd07c23-9711-4ba2-86ad-f921d0834712-kube-api-access-2fp7m\") pod \"6bd07c23-9711-4ba2-86ad-f921d0834712\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.144596 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-openstack-edpm-ipam\") pod \"6bd07c23-9711-4ba2-86ad-f921d0834712\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.144648 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-sb\") pod \"6bd07c23-9711-4ba2-86ad-f921d0834712\" (UID: \"6bd07c23-9711-4ba2-86ad-f921d0834712\") " Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.167290 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bd07c23-9711-4ba2-86ad-f921d0834712-kube-api-access-2fp7m" (OuterVolumeSpecName: "kube-api-access-2fp7m") pod "6bd07c23-9711-4ba2-86ad-f921d0834712" (UID: "6bd07c23-9711-4ba2-86ad-f921d0834712"). InnerVolumeSpecName "kube-api-access-2fp7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.206578 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6bd07c23-9711-4ba2-86ad-f921d0834712" (UID: "6bd07c23-9711-4ba2-86ad-f921d0834712"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.222733 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6bd07c23-9711-4ba2-86ad-f921d0834712" (UID: "6bd07c23-9711-4ba2-86ad-f921d0834712"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.232743 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6bd07c23-9711-4ba2-86ad-f921d0834712" (UID: "6bd07c23-9711-4ba2-86ad-f921d0834712"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.233581 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-config" (OuterVolumeSpecName: "config") pod "6bd07c23-9711-4ba2-86ad-f921d0834712" (UID: "6bd07c23-9711-4ba2-86ad-f921d0834712"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.246443 4814 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.246475 4814 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.246487 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fp7m\" (UniqueName: \"kubernetes.io/projected/6bd07c23-9711-4ba2-86ad-f921d0834712-kube-api-access-2fp7m\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.246495 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.246503 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-config\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.255522 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6bd07c23-9711-4ba2-86ad-f921d0834712" (UID: "6bd07c23-9711-4ba2-86ad-f921d0834712"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.257153 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "6bd07c23-9711-4ba2-86ad-f921d0834712" (UID: "6bd07c23-9711-4ba2-86ad-f921d0834712"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.315512 4814 generic.go:334] "Generic (PLEG): container finished" podID="6bd07c23-9711-4ba2-86ad-f921d0834712" containerID="a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e" exitCode=0 Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.315562 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" event={"ID":"6bd07c23-9711-4ba2-86ad-f921d0834712","Type":"ContainerDied","Data":"a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e"} Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.315571 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.315589 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-mhpp5" event={"ID":"6bd07c23-9711-4ba2-86ad-f921d0834712","Type":"ContainerDied","Data":"dd264a797c74505c8ffd67957eb055055e543c9ae72b64b3673d23b6605e40bb"} Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.315606 4814 scope.go:117] "RemoveContainer" containerID="a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.342855 4814 scope.go:117] "RemoveContainer" containerID="c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.347674 4814 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.347698 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/6bd07c23-9711-4ba2-86ad-f921d0834712-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.353589 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-mhpp5"] Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.357164 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-mhpp5"] Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.363588 4814 scope.go:117] "RemoveContainer" containerID="a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e" Jan 22 05:41:00 crc kubenswrapper[4814]: E0122 05:41:00.364011 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e\": container with ID starting with a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e not found: ID does not exist" containerID="a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.364064 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e"} err="failed to get container status \"a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e\": rpc error: code = NotFound desc = could not find container \"a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e\": container with ID starting with a588c2cf6403990c7b22a36fea50a18105b83f3b02c609c574192c4f8db3e63e not found: ID does not exist" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.364094 4814 scope.go:117] "RemoveContainer" containerID="c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344" Jan 22 05:41:00 crc kubenswrapper[4814]: E0122 05:41:00.364418 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344\": container with ID starting with c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344 not found: ID does not exist" containerID="c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344" Jan 22 05:41:00 crc kubenswrapper[4814]: I0122 05:41:00.364454 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344"} err="failed to get container status \"c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344\": rpc error: code = NotFound desc = could not find container \"c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344\": container with ID starting with c6b25b14c481c1e8328cae60fd405c5ba59fa1dec2ce8d3bce41b363e67e8344 not found: ID does not exist" Jan 22 05:41:02 crc kubenswrapper[4814]: I0122 05:41:02.355236 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bd07c23-9711-4ba2-86ad-f921d0834712" path="/var/lib/kubelet/pods/6bd07c23-9711-4ba2-86ad-f921d0834712/volumes" Jan 22 05:41:08 crc kubenswrapper[4814]: E0122 05:41:08.897736 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde0f9aac_bcdb_41fa_952c_0c421486dedc.slice/crio-conmon-6636ec06976f4bdc3a1d6770cfd95c6e4bf75c732dff75e9d1fe8079f925a0ca.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde0f9aac_bcdb_41fa_952c_0c421486dedc.slice/crio-6636ec06976f4bdc3a1d6770cfd95c6e4bf75c732dff75e9d1fe8079f925a0ca.scope\": RecentStats: unable to find data in memory cache]" Jan 22 05:41:09 crc kubenswrapper[4814]: I0122 05:41:09.433715 4814 generic.go:334] "Generic (PLEG): container finished" podID="de0f9aac-bcdb-41fa-952c-0c421486dedc" containerID="6636ec06976f4bdc3a1d6770cfd95c6e4bf75c732dff75e9d1fe8079f925a0ca" exitCode=0 Jan 22 05:41:09 crc kubenswrapper[4814]: I0122 05:41:09.433813 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"de0f9aac-bcdb-41fa-952c-0c421486dedc","Type":"ContainerDied","Data":"6636ec06976f4bdc3a1d6770cfd95c6e4bf75c732dff75e9d1fe8079f925a0ca"} Jan 22 05:41:10 crc kubenswrapper[4814]: I0122 05:41:10.444966 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"de0f9aac-bcdb-41fa-952c-0c421486dedc","Type":"ContainerStarted","Data":"378aea2a514b20dce34abe5374a75be3aba75e853e58e843e578006d079c7fb4"} Jan 22 05:41:10 crc kubenswrapper[4814]: I0122 05:41:10.445547 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 22 05:41:10 crc kubenswrapper[4814]: I0122 05:41:10.447584 4814 generic.go:334] "Generic (PLEG): container finished" podID="229a7e07-faf2-47ae-b9c2-b419ebae805f" containerID="aa88df98067b324ca7a4006e8a8f9421c64fc7c51332a7ac6e95573f4a57e3dd" exitCode=0 Jan 22 05:41:10 crc kubenswrapper[4814]: I0122 05:41:10.447671 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"229a7e07-faf2-47ae-b9c2-b419ebae805f","Type":"ContainerDied","Data":"aa88df98067b324ca7a4006e8a8f9421c64fc7c51332a7ac6e95573f4a57e3dd"} Jan 22 05:41:10 crc kubenswrapper[4814]: I0122 05:41:10.476451 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.476434389 podStartE2EDuration="37.476434389s" podCreationTimestamp="2026-01-22 05:40:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:41:10.469993189 +0000 UTC m=+1356.553481414" watchObservedRunningTime="2026-01-22 05:41:10.476434389 +0000 UTC m=+1356.559922604" Jan 22 05:41:11 crc kubenswrapper[4814]: I0122 05:41:11.460813 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"229a7e07-faf2-47ae-b9c2-b419ebae805f","Type":"ContainerStarted","Data":"1f79c6b5a35139891ca36443f4784188880e242ae1ad2523371867709ea8f356"} Jan 22 05:41:11 crc kubenswrapper[4814]: I0122 05:41:11.461826 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:41:11 crc kubenswrapper[4814]: I0122 05:41:11.491200 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=35.491185208 podStartE2EDuration="35.491185208s" podCreationTimestamp="2026-01-22 05:40:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 05:41:11.486448961 +0000 UTC m=+1357.569937186" watchObservedRunningTime="2026-01-22 05:41:11.491185208 +0000 UTC m=+1357.574673423" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.316141 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm"] Jan 22 05:41:13 crc kubenswrapper[4814]: E0122 05:41:13.317439 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b575a98c-ea17-4a9a-b796-d54720b31dfa" containerName="init" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.317507 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b575a98c-ea17-4a9a-b796-d54720b31dfa" containerName="init" Jan 22 05:41:13 crc kubenswrapper[4814]: E0122 05:41:13.317569 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bd07c23-9711-4ba2-86ad-f921d0834712" containerName="dnsmasq-dns" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.317641 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bd07c23-9711-4ba2-86ad-f921d0834712" containerName="dnsmasq-dns" Jan 22 05:41:13 crc kubenswrapper[4814]: E0122 05:41:13.317711 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b575a98c-ea17-4a9a-b796-d54720b31dfa" containerName="dnsmasq-dns" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.317763 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="b575a98c-ea17-4a9a-b796-d54720b31dfa" containerName="dnsmasq-dns" Jan 22 05:41:13 crc kubenswrapper[4814]: E0122 05:41:13.317824 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bd07c23-9711-4ba2-86ad-f921d0834712" containerName="init" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.317874 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bd07c23-9711-4ba2-86ad-f921d0834712" containerName="init" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.318103 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="b575a98c-ea17-4a9a-b796-d54720b31dfa" containerName="dnsmasq-dns" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.318168 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bd07c23-9711-4ba2-86ad-f921d0834712" containerName="dnsmasq-dns" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.318871 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.320872 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.321413 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.321762 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.373134 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.374744 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm"] Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.413840 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.413903 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsvlg\" (UniqueName: \"kubernetes.io/projected/7aef7905-6f63-4b1c-92b8-d2687d5affb1-kube-api-access-tsvlg\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.413995 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.414024 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.516202 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.516282 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.516460 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.516533 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsvlg\" (UniqueName: \"kubernetes.io/projected/7aef7905-6f63-4b1c-92b8-d2687d5affb1-kube-api-access-tsvlg\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.521850 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.525169 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.525423 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.533583 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsvlg\" (UniqueName: \"kubernetes.io/projected/7aef7905-6f63-4b1c-92b8-d2687d5affb1-kube-api-access-tsvlg\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:13 crc kubenswrapper[4814]: I0122 05:41:13.714740 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:14 crc kubenswrapper[4814]: I0122 05:41:14.324960 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm"] Jan 22 05:41:14 crc kubenswrapper[4814]: I0122 05:41:14.487218 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" event={"ID":"7aef7905-6f63-4b1c-92b8-d2687d5affb1","Type":"ContainerStarted","Data":"f8803f743b2f3794c1d53e72790df9ac4c426bf7d0984490b2568a2090226899"} Jan 22 05:41:19 crc kubenswrapper[4814]: I0122 05:41:19.613777 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:41:19 crc kubenswrapper[4814]: I0122 05:41:19.614105 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:41:19 crc kubenswrapper[4814]: I0122 05:41:19.614384 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:41:19 crc kubenswrapper[4814]: I0122 05:41:19.615152 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"47eea733882c66d487823fb004595bb5b74593750bd6730a1b625e73c2be11e0"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:41:19 crc kubenswrapper[4814]: I0122 05:41:19.615214 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://47eea733882c66d487823fb004595bb5b74593750bd6730a1b625e73c2be11e0" gracePeriod=600 Jan 22 05:41:20 crc kubenswrapper[4814]: I0122 05:41:20.543844 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="47eea733882c66d487823fb004595bb5b74593750bd6730a1b625e73c2be11e0" exitCode=0 Jan 22 05:41:20 crc kubenswrapper[4814]: I0122 05:41:20.544070 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"47eea733882c66d487823fb004595bb5b74593750bd6730a1b625e73c2be11e0"} Jan 22 05:41:20 crc kubenswrapper[4814]: I0122 05:41:20.544100 4814 scope.go:117] "RemoveContainer" containerID="4cc634dfae0a47901cc979ba5b63d3858a39aa8e9b0382a2430471166dd22de7" Jan 22 05:41:25 crc kubenswrapper[4814]: I0122 05:41:25.017813 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 22 05:41:25 crc kubenswrapper[4814]: I0122 05:41:25.599244 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683"} Jan 22 05:41:25 crc kubenswrapper[4814]: I0122 05:41:25.602349 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" event={"ID":"7aef7905-6f63-4b1c-92b8-d2687d5affb1","Type":"ContainerStarted","Data":"6f4458194a40b7fdb3a88d6b46f9dcf32c1ee7f52f4051816502baee264cb771"} Jan 22 05:41:25 crc kubenswrapper[4814]: I0122 05:41:25.639112 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" podStartSLOduration=2.195097156 podStartE2EDuration="12.639096267s" podCreationTimestamp="2026-01-22 05:41:13 +0000 UTC" firstStartedPulling="2026-01-22 05:41:14.323175339 +0000 UTC m=+1360.406663554" lastFinishedPulling="2026-01-22 05:41:24.76717444 +0000 UTC m=+1370.850662665" observedRunningTime="2026-01-22 05:41:25.628295051 +0000 UTC m=+1371.711783266" watchObservedRunningTime="2026-01-22 05:41:25.639096267 +0000 UTC m=+1371.722584482" Jan 22 05:41:26 crc kubenswrapper[4814]: I0122 05:41:26.682821 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 22 05:41:37 crc kubenswrapper[4814]: I0122 05:41:37.729710 4814 generic.go:334] "Generic (PLEG): container finished" podID="7aef7905-6f63-4b1c-92b8-d2687d5affb1" containerID="6f4458194a40b7fdb3a88d6b46f9dcf32c1ee7f52f4051816502baee264cb771" exitCode=0 Jan 22 05:41:37 crc kubenswrapper[4814]: I0122 05:41:37.729825 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" event={"ID":"7aef7905-6f63-4b1c-92b8-d2687d5affb1","Type":"ContainerDied","Data":"6f4458194a40b7fdb3a88d6b46f9dcf32c1ee7f52f4051816502baee264cb771"} Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.187889 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.224655 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsvlg\" (UniqueName: \"kubernetes.io/projected/7aef7905-6f63-4b1c-92b8-d2687d5affb1-kube-api-access-tsvlg\") pod \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.224747 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-repo-setup-combined-ca-bundle\") pod \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.224899 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-inventory\") pod \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.225004 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-ssh-key-openstack-edpm-ipam\") pod \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\" (UID: \"7aef7905-6f63-4b1c-92b8-d2687d5affb1\") " Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.244822 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7aef7905-6f63-4b1c-92b8-d2687d5affb1-kube-api-access-tsvlg" (OuterVolumeSpecName: "kube-api-access-tsvlg") pod "7aef7905-6f63-4b1c-92b8-d2687d5affb1" (UID: "7aef7905-6f63-4b1c-92b8-d2687d5affb1"). InnerVolumeSpecName "kube-api-access-tsvlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.259244 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "7aef7905-6f63-4b1c-92b8-d2687d5affb1" (UID: "7aef7905-6f63-4b1c-92b8-d2687d5affb1"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.273563 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-inventory" (OuterVolumeSpecName: "inventory") pod "7aef7905-6f63-4b1c-92b8-d2687d5affb1" (UID: "7aef7905-6f63-4b1c-92b8-d2687d5affb1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.279693 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7aef7905-6f63-4b1c-92b8-d2687d5affb1" (UID: "7aef7905-6f63-4b1c-92b8-d2687d5affb1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.327514 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.327775 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.327785 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsvlg\" (UniqueName: \"kubernetes.io/projected/7aef7905-6f63-4b1c-92b8-d2687d5affb1-kube-api-access-tsvlg\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.327795 4814 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7aef7905-6f63-4b1c-92b8-d2687d5affb1-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.750087 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" event={"ID":"7aef7905-6f63-4b1c-92b8-d2687d5affb1","Type":"ContainerDied","Data":"f8803f743b2f3794c1d53e72790df9ac4c426bf7d0984490b2568a2090226899"} Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.750140 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8803f743b2f3794c1d53e72790df9ac4c426bf7d0984490b2568a2090226899" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.750140 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-5q5fm" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.921591 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt"] Jan 22 05:41:39 crc kubenswrapper[4814]: E0122 05:41:39.922323 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7aef7905-6f63-4b1c-92b8-d2687d5affb1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.922339 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7aef7905-6f63-4b1c-92b8-d2687d5affb1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.922669 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7aef7905-6f63-4b1c-92b8-d2687d5affb1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.923496 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.928313 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.928481 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.928636 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.928770 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:41:39 crc kubenswrapper[4814]: I0122 05:41:39.973496 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt"] Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.061735 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mqmn\" (UniqueName: \"kubernetes.io/projected/095acb56-0517-4287-b7ef-6a000b255cb7-kube-api-access-2mqmn\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.062291 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.062452 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.163831 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.163982 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mqmn\" (UniqueName: \"kubernetes.io/projected/095acb56-0517-4287-b7ef-6a000b255cb7-kube-api-access-2mqmn\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.164043 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.169403 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.170514 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.180029 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mqmn\" (UniqueName: \"kubernetes.io/projected/095acb56-0517-4287-b7ef-6a000b255cb7-kube-api-access-2mqmn\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-5lhpt\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.242634 4814 scope.go:117] "RemoveContainer" containerID="db183127b8c5203be8cd0eb630502e22866d8a01d857e36bccf6c1af69f3b78f" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.271024 4814 scope.go:117] "RemoveContainer" containerID="6bc0e39c263a87bf5acd852a15127c3e30522ce1b8d70bc9c8199022bad57eaf" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.272881 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.316228 4814 scope.go:117] "RemoveContainer" containerID="536da12983e15963b7518a37321f5ff40c64e2172e1bcc7103071af96b9535bc" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.416163 4814 scope.go:117] "RemoveContainer" containerID="ba70de8318a9434d35facf4c16d1dbd28bb5e77cdb81af5035192ac0f65f5894" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.463414 4814 scope.go:117] "RemoveContainer" containerID="31c4ea1a6e886e542e501677a943e1b7a8b0732519bc9e9a6940f40a45a4d197" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.503668 4814 scope.go:117] "RemoveContainer" containerID="97f257806b7f3681374dbe001c01a6167cab18ac3f035513fa06dd0adb750034" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.543960 4814 scope.go:117] "RemoveContainer" containerID="78e6f2bcae629b7dc179ad9cc64009fba1bf19de277d94dd338394c5884102c6" Jan 22 05:41:40 crc kubenswrapper[4814]: I0122 05:41:40.849306 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt"] Jan 22 05:41:41 crc kubenswrapper[4814]: I0122 05:41:41.774122 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" event={"ID":"095acb56-0517-4287-b7ef-6a000b255cb7","Type":"ContainerStarted","Data":"a05fea9ae7e4289356e8035714651aca2d8bf587af77bdedbd3c9cdcf098fe63"} Jan 22 05:41:41 crc kubenswrapper[4814]: I0122 05:41:41.774477 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" event={"ID":"095acb56-0517-4287-b7ef-6a000b255cb7","Type":"ContainerStarted","Data":"f4cc018a7d3d057c20ca75880fc1f0c0bd307000c8397949d9b45dcf63da3af9"} Jan 22 05:41:41 crc kubenswrapper[4814]: I0122 05:41:41.802352 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" podStartSLOduration=2.403162448 podStartE2EDuration="2.802330334s" podCreationTimestamp="2026-01-22 05:41:39 +0000 UTC" firstStartedPulling="2026-01-22 05:41:40.856418661 +0000 UTC m=+1386.939906876" lastFinishedPulling="2026-01-22 05:41:41.255586547 +0000 UTC m=+1387.339074762" observedRunningTime="2026-01-22 05:41:41.794081968 +0000 UTC m=+1387.877570183" watchObservedRunningTime="2026-01-22 05:41:41.802330334 +0000 UTC m=+1387.885818549" Jan 22 05:41:44 crc kubenswrapper[4814]: I0122 05:41:44.808099 4814 generic.go:334] "Generic (PLEG): container finished" podID="095acb56-0517-4287-b7ef-6a000b255cb7" containerID="a05fea9ae7e4289356e8035714651aca2d8bf587af77bdedbd3c9cdcf098fe63" exitCode=0 Jan 22 05:41:44 crc kubenswrapper[4814]: I0122 05:41:44.808258 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" event={"ID":"095acb56-0517-4287-b7ef-6a000b255cb7","Type":"ContainerDied","Data":"a05fea9ae7e4289356e8035714651aca2d8bf587af77bdedbd3c9cdcf098fe63"} Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.296210 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.387771 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-ssh-key-openstack-edpm-ipam\") pod \"095acb56-0517-4287-b7ef-6a000b255cb7\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.387954 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mqmn\" (UniqueName: \"kubernetes.io/projected/095acb56-0517-4287-b7ef-6a000b255cb7-kube-api-access-2mqmn\") pod \"095acb56-0517-4287-b7ef-6a000b255cb7\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.387976 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-inventory\") pod \"095acb56-0517-4287-b7ef-6a000b255cb7\" (UID: \"095acb56-0517-4287-b7ef-6a000b255cb7\") " Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.394132 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/095acb56-0517-4287-b7ef-6a000b255cb7-kube-api-access-2mqmn" (OuterVolumeSpecName: "kube-api-access-2mqmn") pod "095acb56-0517-4287-b7ef-6a000b255cb7" (UID: "095acb56-0517-4287-b7ef-6a000b255cb7"). InnerVolumeSpecName "kube-api-access-2mqmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.423081 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "095acb56-0517-4287-b7ef-6a000b255cb7" (UID: "095acb56-0517-4287-b7ef-6a000b255cb7"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.435411 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-inventory" (OuterVolumeSpecName: "inventory") pod "095acb56-0517-4287-b7ef-6a000b255cb7" (UID: "095acb56-0517-4287-b7ef-6a000b255cb7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.490460 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.490525 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mqmn\" (UniqueName: \"kubernetes.io/projected/095acb56-0517-4287-b7ef-6a000b255cb7-kube-api-access-2mqmn\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.490584 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/095acb56-0517-4287-b7ef-6a000b255cb7-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.835265 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" event={"ID":"095acb56-0517-4287-b7ef-6a000b255cb7","Type":"ContainerDied","Data":"f4cc018a7d3d057c20ca75880fc1f0c0bd307000c8397949d9b45dcf63da3af9"} Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.835310 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4cc018a7d3d057c20ca75880fc1f0c0bd307000c8397949d9b45dcf63da3af9" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.835681 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-5lhpt" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.975048 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk"] Jan 22 05:41:46 crc kubenswrapper[4814]: E0122 05:41:46.975697 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095acb56-0517-4287-b7ef-6a000b255cb7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.975726 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="095acb56-0517-4287-b7ef-6a000b255cb7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.976131 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="095acb56-0517-4287-b7ef-6a000b255cb7" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.977122 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.981834 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.982317 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.982544 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.986064 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:41:46 crc kubenswrapper[4814]: I0122 05:41:46.988553 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk"] Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.103769 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl57k\" (UniqueName: \"kubernetes.io/projected/7110037d-448b-4fb2-bc41-38848890a505-kube-api-access-bl57k\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.103949 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.104172 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.104308 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.206701 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.206754 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.206847 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl57k\" (UniqueName: \"kubernetes.io/projected/7110037d-448b-4fb2-bc41-38848890a505-kube-api-access-bl57k\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.206893 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.215336 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.217999 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.226567 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.227591 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl57k\" (UniqueName: \"kubernetes.io/projected/7110037d-448b-4fb2-bc41-38848890a505-kube-api-access-bl57k\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.298691 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.703054 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk"] Jan 22 05:41:47 crc kubenswrapper[4814]: W0122 05:41:47.718831 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7110037d_448b_4fb2_bc41_38848890a505.slice/crio-b66c9d222912f03a605eb1877c7735851bb03041ce6bce9ccd531f37f0960ca9 WatchSource:0}: Error finding container b66c9d222912f03a605eb1877c7735851bb03041ce6bce9ccd531f37f0960ca9: Status 404 returned error can't find the container with id b66c9d222912f03a605eb1877c7735851bb03041ce6bce9ccd531f37f0960ca9 Jan 22 05:41:47 crc kubenswrapper[4814]: I0122 05:41:47.845043 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" event={"ID":"7110037d-448b-4fb2-bc41-38848890a505","Type":"ContainerStarted","Data":"b66c9d222912f03a605eb1877c7735851bb03041ce6bce9ccd531f37f0960ca9"} Jan 22 05:41:48 crc kubenswrapper[4814]: I0122 05:41:48.853487 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" event={"ID":"7110037d-448b-4fb2-bc41-38848890a505","Type":"ContainerStarted","Data":"ceb660bec2e4b9b38ae4d43043573a22df9b16750ff0bd7762a7d1c049dd047e"} Jan 22 05:41:48 crc kubenswrapper[4814]: I0122 05:41:48.874181 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" podStartSLOduration=2.405025214 podStartE2EDuration="2.874161982s" podCreationTimestamp="2026-01-22 05:41:46 +0000 UTC" firstStartedPulling="2026-01-22 05:41:47.720648717 +0000 UTC m=+1393.804136922" lastFinishedPulling="2026-01-22 05:41:48.189785455 +0000 UTC m=+1394.273273690" observedRunningTime="2026-01-22 05:41:48.869033943 +0000 UTC m=+1394.952522168" watchObservedRunningTime="2026-01-22 05:41:48.874161982 +0000 UTC m=+1394.957650197" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.065345 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nxsm8"] Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.067559 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.124052 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nxsm8"] Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.246257 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-utilities\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.246350 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-catalog-content\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.246373 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncvwn\" (UniqueName: \"kubernetes.io/projected/736b9a97-c3fd-4884-9222-b4cecf131249-kube-api-access-ncvwn\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.348208 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-catalog-content\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.348558 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncvwn\" (UniqueName: \"kubernetes.io/projected/736b9a97-c3fd-4884-9222-b4cecf131249-kube-api-access-ncvwn\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.348700 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-catalog-content\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.348847 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-utilities\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.349286 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-utilities\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.370671 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncvwn\" (UniqueName: \"kubernetes.io/projected/736b9a97-c3fd-4884-9222-b4cecf131249-kube-api-access-ncvwn\") pod \"redhat-operators-nxsm8\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.385506 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:19 crc kubenswrapper[4814]: I0122 05:42:19.924192 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nxsm8"] Jan 22 05:42:20 crc kubenswrapper[4814]: I0122 05:42:20.189364 4814 generic.go:334] "Generic (PLEG): container finished" podID="736b9a97-c3fd-4884-9222-b4cecf131249" containerID="e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924" exitCode=0 Jan 22 05:42:20 crc kubenswrapper[4814]: I0122 05:42:20.189531 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxsm8" event={"ID":"736b9a97-c3fd-4884-9222-b4cecf131249","Type":"ContainerDied","Data":"e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924"} Jan 22 05:42:20 crc kubenswrapper[4814]: I0122 05:42:20.189698 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxsm8" event={"ID":"736b9a97-c3fd-4884-9222-b4cecf131249","Type":"ContainerStarted","Data":"729523b83db0f8ff649af07a456c31b5c186f85c1ae6e321c445d647bd8548ab"} Jan 22 05:42:22 crc kubenswrapper[4814]: I0122 05:42:22.217211 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxsm8" event={"ID":"736b9a97-c3fd-4884-9222-b4cecf131249","Type":"ContainerStarted","Data":"8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882"} Jan 22 05:42:25 crc kubenswrapper[4814]: I0122 05:42:25.250830 4814 generic.go:334] "Generic (PLEG): container finished" podID="736b9a97-c3fd-4884-9222-b4cecf131249" containerID="8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882" exitCode=0 Jan 22 05:42:25 crc kubenswrapper[4814]: I0122 05:42:25.250949 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxsm8" event={"ID":"736b9a97-c3fd-4884-9222-b4cecf131249","Type":"ContainerDied","Data":"8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882"} Jan 22 05:42:26 crc kubenswrapper[4814]: I0122 05:42:26.271893 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxsm8" event={"ID":"736b9a97-c3fd-4884-9222-b4cecf131249","Type":"ContainerStarted","Data":"ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86"} Jan 22 05:42:26 crc kubenswrapper[4814]: I0122 05:42:26.311370 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nxsm8" podStartSLOduration=1.794904298 podStartE2EDuration="7.311354991s" podCreationTimestamp="2026-01-22 05:42:19 +0000 UTC" firstStartedPulling="2026-01-22 05:42:20.19111308 +0000 UTC m=+1426.274601295" lastFinishedPulling="2026-01-22 05:42:25.707563743 +0000 UTC m=+1431.791051988" observedRunningTime="2026-01-22 05:42:26.292793974 +0000 UTC m=+1432.376282210" watchObservedRunningTime="2026-01-22 05:42:26.311354991 +0000 UTC m=+1432.394843206" Jan 22 05:42:29 crc kubenswrapper[4814]: I0122 05:42:29.386661 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:29 crc kubenswrapper[4814]: I0122 05:42:29.386872 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:30 crc kubenswrapper[4814]: I0122 05:42:30.445865 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nxsm8" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="registry-server" probeResult="failure" output=< Jan 22 05:42:30 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:42:30 crc kubenswrapper[4814]: > Jan 22 05:42:39 crc kubenswrapper[4814]: I0122 05:42:39.440428 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:39 crc kubenswrapper[4814]: I0122 05:42:39.508912 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:39 crc kubenswrapper[4814]: I0122 05:42:39.677483 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nxsm8"] Jan 22 05:42:40 crc kubenswrapper[4814]: I0122 05:42:40.682314 4814 scope.go:117] "RemoveContainer" containerID="bc1a6f38726e7ad1797308a8a261f1f3b61931df828b6b03371101094e4c74e2" Jan 22 05:42:40 crc kubenswrapper[4814]: I0122 05:42:40.716650 4814 scope.go:117] "RemoveContainer" containerID="7508e78fc668fe22363adf5d95c259d6251ffb9356ef19b19c86116d46a0a671" Jan 22 05:42:40 crc kubenswrapper[4814]: I0122 05:42:40.789859 4814 scope.go:117] "RemoveContainer" containerID="4c739bed32d8de7c967b4984ad01401552654bcc8e3c75ddb35c78a22b2836f9" Jan 22 05:42:41 crc kubenswrapper[4814]: I0122 05:42:41.434269 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nxsm8" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="registry-server" containerID="cri-o://ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86" gracePeriod=2 Jan 22 05:42:41 crc kubenswrapper[4814]: I0122 05:42:41.924989 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.054059 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncvwn\" (UniqueName: \"kubernetes.io/projected/736b9a97-c3fd-4884-9222-b4cecf131249-kube-api-access-ncvwn\") pod \"736b9a97-c3fd-4884-9222-b4cecf131249\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.054139 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-utilities\") pod \"736b9a97-c3fd-4884-9222-b4cecf131249\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.054371 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-catalog-content\") pod \"736b9a97-c3fd-4884-9222-b4cecf131249\" (UID: \"736b9a97-c3fd-4884-9222-b4cecf131249\") " Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.056585 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-utilities" (OuterVolumeSpecName: "utilities") pod "736b9a97-c3fd-4884-9222-b4cecf131249" (UID: "736b9a97-c3fd-4884-9222-b4cecf131249"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.060930 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/736b9a97-c3fd-4884-9222-b4cecf131249-kube-api-access-ncvwn" (OuterVolumeSpecName: "kube-api-access-ncvwn") pod "736b9a97-c3fd-4884-9222-b4cecf131249" (UID: "736b9a97-c3fd-4884-9222-b4cecf131249"). InnerVolumeSpecName "kube-api-access-ncvwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.156838 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncvwn\" (UniqueName: \"kubernetes.io/projected/736b9a97-c3fd-4884-9222-b4cecf131249-kube-api-access-ncvwn\") on node \"crc\" DevicePath \"\"" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.156885 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.187282 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "736b9a97-c3fd-4884-9222-b4cecf131249" (UID: "736b9a97-c3fd-4884-9222-b4cecf131249"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.258940 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/736b9a97-c3fd-4884-9222-b4cecf131249-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.450275 4814 generic.go:334] "Generic (PLEG): container finished" podID="736b9a97-c3fd-4884-9222-b4cecf131249" containerID="ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86" exitCode=0 Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.450323 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxsm8" event={"ID":"736b9a97-c3fd-4884-9222-b4cecf131249","Type":"ContainerDied","Data":"ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86"} Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.450678 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxsm8" event={"ID":"736b9a97-c3fd-4884-9222-b4cecf131249","Type":"ContainerDied","Data":"729523b83db0f8ff649af07a456c31b5c186f85c1ae6e321c445d647bd8548ab"} Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.450389 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nxsm8" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.450762 4814 scope.go:117] "RemoveContainer" containerID="ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.490576 4814 scope.go:117] "RemoveContainer" containerID="8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.499619 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nxsm8"] Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.511426 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nxsm8"] Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.517993 4814 scope.go:117] "RemoveContainer" containerID="e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.632574 4814 scope.go:117] "RemoveContainer" containerID="ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86" Jan 22 05:42:42 crc kubenswrapper[4814]: E0122 05:42:42.634984 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86\": container with ID starting with ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86 not found: ID does not exist" containerID="ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.635053 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86"} err="failed to get container status \"ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86\": rpc error: code = NotFound desc = could not find container \"ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86\": container with ID starting with ffcec7bacc1c0927de08385dd5986b37d7e423763ffe5da084dcbebf3b7b0a86 not found: ID does not exist" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.635093 4814 scope.go:117] "RemoveContainer" containerID="8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882" Jan 22 05:42:42 crc kubenswrapper[4814]: E0122 05:42:42.635786 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882\": container with ID starting with 8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882 not found: ID does not exist" containerID="8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.635896 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882"} err="failed to get container status \"8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882\": rpc error: code = NotFound desc = could not find container \"8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882\": container with ID starting with 8d56aa423c9452d7e9c8fa5d4aa6febfe462633b694ddbbf6fa51912a1ddf882 not found: ID does not exist" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.636015 4814 scope.go:117] "RemoveContainer" containerID="e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924" Jan 22 05:42:42 crc kubenswrapper[4814]: E0122 05:42:42.636547 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924\": container with ID starting with e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924 not found: ID does not exist" containerID="e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924" Jan 22 05:42:42 crc kubenswrapper[4814]: I0122 05:42:42.636598 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924"} err="failed to get container status \"e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924\": rpc error: code = NotFound desc = could not find container \"e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924\": container with ID starting with e1d9c07f2d2a07517d6da77e842a76ee8a964b8144774fb141601c89c21a9924 not found: ID does not exist" Jan 22 05:42:44 crc kubenswrapper[4814]: I0122 05:42:44.362728 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" path="/var/lib/kubelet/pods/736b9a97-c3fd-4884-9222-b4cecf131249/volumes" Jan 22 05:43:40 crc kubenswrapper[4814]: I0122 05:43:40.887990 4814 scope.go:117] "RemoveContainer" containerID="7227166f51a6ea452cf3e6ed6b122ec93a84b70b70b63faf04397ddc39650cad" Jan 22 05:43:40 crc kubenswrapper[4814]: I0122 05:43:40.913403 4814 scope.go:117] "RemoveContainer" containerID="b8bcc01cfe1c622a59e9044973a57b66974a5ee04cc631bf5ec60dd023dcce96" Jan 22 05:43:40 crc kubenswrapper[4814]: I0122 05:43:40.943785 4814 scope.go:117] "RemoveContainer" containerID="7eb9e732f24af4380b07ac7b0338602687d4c83dee6e7ba24957279d1463bd6e" Jan 22 05:43:40 crc kubenswrapper[4814]: I0122 05:43:40.971353 4814 scope.go:117] "RemoveContainer" containerID="4651deb0cb00d00ee5b3ad4c6288d97849f4e83793a9a8e16ab8ddb2b903f0d0" Jan 22 05:43:49 crc kubenswrapper[4814]: I0122 05:43:49.614099 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:43:49 crc kubenswrapper[4814]: I0122 05:43:49.614844 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.069748 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6wsvs"] Jan 22 05:44:11 crc kubenswrapper[4814]: E0122 05:44:11.070809 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="registry-server" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.070831 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="registry-server" Jan 22 05:44:11 crc kubenswrapper[4814]: E0122 05:44:11.070881 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="extract-utilities" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.070904 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="extract-utilities" Jan 22 05:44:11 crc kubenswrapper[4814]: E0122 05:44:11.070931 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="extract-content" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.070942 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="extract-content" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.071334 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="736b9a97-c3fd-4884-9222-b4cecf131249" containerName="registry-server" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.085471 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.099846 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6wsvs"] Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.240598 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-catalog-content\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.241783 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dw86r\" (UniqueName: \"kubernetes.io/projected/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-kube-api-access-dw86r\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.242008 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-utilities\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.344056 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dw86r\" (UniqueName: \"kubernetes.io/projected/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-kube-api-access-dw86r\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.344320 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-utilities\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.344364 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-catalog-content\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.345001 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-utilities\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.345659 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-catalog-content\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.368681 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dw86r\" (UniqueName: \"kubernetes.io/projected/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-kube-api-access-dw86r\") pod \"community-operators-6wsvs\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.418891 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:11 crc kubenswrapper[4814]: I0122 05:44:11.938190 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6wsvs"] Jan 22 05:44:12 crc kubenswrapper[4814]: I0122 05:44:12.428038 4814 generic.go:334] "Generic (PLEG): container finished" podID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerID="80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd" exitCode=0 Jan 22 05:44:12 crc kubenswrapper[4814]: I0122 05:44:12.428291 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wsvs" event={"ID":"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8","Type":"ContainerDied","Data":"80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd"} Jan 22 05:44:12 crc kubenswrapper[4814]: I0122 05:44:12.428418 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wsvs" event={"ID":"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8","Type":"ContainerStarted","Data":"a2b866206deb38198e3a8c21f738553b31ab4a4dc0aca5aa2bfc7a2b83add5a6"} Jan 22 05:44:12 crc kubenswrapper[4814]: I0122 05:44:12.432927 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:44:13 crc kubenswrapper[4814]: I0122 05:44:13.448004 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wsvs" event={"ID":"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8","Type":"ContainerStarted","Data":"3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401"} Jan 22 05:44:13 crc kubenswrapper[4814]: I0122 05:44:13.835778 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-727qb"] Jan 22 05:44:13 crc kubenswrapper[4814]: I0122 05:44:13.839039 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:13 crc kubenswrapper[4814]: I0122 05:44:13.862068 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-727qb"] Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.001267 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-catalog-content\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.001359 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm2wz\" (UniqueName: \"kubernetes.io/projected/8fcb452e-d5b1-4d61-b043-554ab64b082c-kube-api-access-vm2wz\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.001391 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-utilities\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.103295 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-catalog-content\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.103389 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm2wz\" (UniqueName: \"kubernetes.io/projected/8fcb452e-d5b1-4d61-b043-554ab64b082c-kube-api-access-vm2wz\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.103419 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-utilities\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.103901 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-utilities\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.104126 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-catalog-content\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.126974 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm2wz\" (UniqueName: \"kubernetes.io/projected/8fcb452e-d5b1-4d61-b043-554ab64b082c-kube-api-access-vm2wz\") pod \"certified-operators-727qb\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.208000 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.482811 4814 generic.go:334] "Generic (PLEG): container finished" podID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerID="3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401" exitCode=0 Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.483038 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wsvs" event={"ID":"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8","Type":"ContainerDied","Data":"3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401"} Jan 22 05:44:14 crc kubenswrapper[4814]: I0122 05:44:14.738274 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-727qb"] Jan 22 05:44:15 crc kubenswrapper[4814]: I0122 05:44:15.496198 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wsvs" event={"ID":"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8","Type":"ContainerStarted","Data":"e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad"} Jan 22 05:44:15 crc kubenswrapper[4814]: I0122 05:44:15.504838 4814 generic.go:334] "Generic (PLEG): container finished" podID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerID="f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5" exitCode=0 Jan 22 05:44:15 crc kubenswrapper[4814]: I0122 05:44:15.504912 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-727qb" event={"ID":"8fcb452e-d5b1-4d61-b043-554ab64b082c","Type":"ContainerDied","Data":"f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5"} Jan 22 05:44:15 crc kubenswrapper[4814]: I0122 05:44:15.504938 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-727qb" event={"ID":"8fcb452e-d5b1-4d61-b043-554ab64b082c","Type":"ContainerStarted","Data":"a6d463993a016c4e06702e74820d4e0b36a1bdad1776c01d5861c242fe924130"} Jan 22 05:44:15 crc kubenswrapper[4814]: I0122 05:44:15.540712 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6wsvs" podStartSLOduration=2.059818805 podStartE2EDuration="4.540692983s" podCreationTimestamp="2026-01-22 05:44:11 +0000 UTC" firstStartedPulling="2026-01-22 05:44:12.430762021 +0000 UTC m=+1538.514250266" lastFinishedPulling="2026-01-22 05:44:14.911636229 +0000 UTC m=+1540.995124444" observedRunningTime="2026-01-22 05:44:15.521075793 +0000 UTC m=+1541.604564008" watchObservedRunningTime="2026-01-22 05:44:15.540692983 +0000 UTC m=+1541.624181198" Jan 22 05:44:16 crc kubenswrapper[4814]: I0122 05:44:16.515284 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-727qb" event={"ID":"8fcb452e-d5b1-4d61-b043-554ab64b082c","Type":"ContainerStarted","Data":"d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea"} Jan 22 05:44:18 crc kubenswrapper[4814]: I0122 05:44:18.536105 4814 generic.go:334] "Generic (PLEG): container finished" podID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerID="d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea" exitCode=0 Jan 22 05:44:18 crc kubenswrapper[4814]: I0122 05:44:18.536314 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-727qb" event={"ID":"8fcb452e-d5b1-4d61-b043-554ab64b082c","Type":"ContainerDied","Data":"d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea"} Jan 22 05:44:19 crc kubenswrapper[4814]: I0122 05:44:19.550034 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-727qb" event={"ID":"8fcb452e-d5b1-4d61-b043-554ab64b082c","Type":"ContainerStarted","Data":"994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0"} Jan 22 05:44:19 crc kubenswrapper[4814]: I0122 05:44:19.576574 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-727qb" podStartSLOduration=2.993588773 podStartE2EDuration="6.576558852s" podCreationTimestamp="2026-01-22 05:44:13 +0000 UTC" firstStartedPulling="2026-01-22 05:44:15.507510962 +0000 UTC m=+1541.590999177" lastFinishedPulling="2026-01-22 05:44:19.090481011 +0000 UTC m=+1545.173969256" observedRunningTime="2026-01-22 05:44:19.573316682 +0000 UTC m=+1545.656804897" watchObservedRunningTime="2026-01-22 05:44:19.576558852 +0000 UTC m=+1545.660047067" Jan 22 05:44:19 crc kubenswrapper[4814]: I0122 05:44:19.613986 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:44:19 crc kubenswrapper[4814]: I0122 05:44:19.614061 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:44:21 crc kubenswrapper[4814]: I0122 05:44:21.419448 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:21 crc kubenswrapper[4814]: I0122 05:44:21.419794 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:21 crc kubenswrapper[4814]: I0122 05:44:21.465834 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:21 crc kubenswrapper[4814]: I0122 05:44:21.635711 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:22 crc kubenswrapper[4814]: I0122 05:44:22.633384 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6wsvs"] Jan 22 05:44:23 crc kubenswrapper[4814]: I0122 05:44:23.585418 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6wsvs" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerName="registry-server" containerID="cri-o://e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad" gracePeriod=2 Jan 22 05:44:23 crc kubenswrapper[4814]: E0122 05:44:23.718506 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28b2db45_ae9f_4fb7_89bb_e7225bfc33d8.slice/crio-e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad.scope\": RecentStats: unable to find data in memory cache]" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.071115 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.208438 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.208503 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.267358 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dw86r\" (UniqueName: \"kubernetes.io/projected/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-kube-api-access-dw86r\") pod \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.267450 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-catalog-content\") pod \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.267643 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-utilities\") pod \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\" (UID: \"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8\") " Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.268504 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-utilities" (OuterVolumeSpecName: "utilities") pod "28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" (UID: "28b2db45-ae9f-4fb7-89bb-e7225bfc33d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.280879 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-kube-api-access-dw86r" (OuterVolumeSpecName: "kube-api-access-dw86r") pod "28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" (UID: "28b2db45-ae9f-4fb7-89bb-e7225bfc33d8"). InnerVolumeSpecName "kube-api-access-dw86r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.326604 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" (UID: "28b2db45-ae9f-4fb7-89bb-e7225bfc33d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.335794 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.369271 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dw86r\" (UniqueName: \"kubernetes.io/projected/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-kube-api-access-dw86r\") on node \"crc\" DevicePath \"\"" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.369301 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.369311 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.604258 4814 generic.go:334] "Generic (PLEG): container finished" podID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerID="e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad" exitCode=0 Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.604361 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6wsvs" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.604415 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wsvs" event={"ID":"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8","Type":"ContainerDied","Data":"e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad"} Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.604455 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6wsvs" event={"ID":"28b2db45-ae9f-4fb7-89bb-e7225bfc33d8","Type":"ContainerDied","Data":"a2b866206deb38198e3a8c21f738553b31ab4a4dc0aca5aa2bfc7a2b83add5a6"} Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.604476 4814 scope.go:117] "RemoveContainer" containerID="e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.634947 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6wsvs"] Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.639021 4814 scope.go:117] "RemoveContainer" containerID="3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.642808 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6wsvs"] Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.663852 4814 scope.go:117] "RemoveContainer" containerID="80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.683301 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.712201 4814 scope.go:117] "RemoveContainer" containerID="e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad" Jan 22 05:44:24 crc kubenswrapper[4814]: E0122 05:44:24.712595 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad\": container with ID starting with e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad not found: ID does not exist" containerID="e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.712654 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad"} err="failed to get container status \"e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad\": rpc error: code = NotFound desc = could not find container \"e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad\": container with ID starting with e263a2973a778b6d3add1e989ff112facd9127367767dbef17e64b3f244162ad not found: ID does not exist" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.712697 4814 scope.go:117] "RemoveContainer" containerID="3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401" Jan 22 05:44:24 crc kubenswrapper[4814]: E0122 05:44:24.713180 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401\": container with ID starting with 3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401 not found: ID does not exist" containerID="3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.713206 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401"} err="failed to get container status \"3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401\": rpc error: code = NotFound desc = could not find container \"3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401\": container with ID starting with 3546c980cf4813b9c3959119112462b1ca84189c05eaf44008fa6e2dccece401 not found: ID does not exist" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.713219 4814 scope.go:117] "RemoveContainer" containerID="80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd" Jan 22 05:44:24 crc kubenswrapper[4814]: E0122 05:44:24.713494 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd\": container with ID starting with 80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd not found: ID does not exist" containerID="80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd" Jan 22 05:44:24 crc kubenswrapper[4814]: I0122 05:44:24.713513 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd"} err="failed to get container status \"80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd\": rpc error: code = NotFound desc = could not find container \"80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd\": container with ID starting with 80001a8451dc4d6e4e4abe46753c63776b049be26b599ac39f0239a6fb5c77fd not found: ID does not exist" Jan 22 05:44:26 crc kubenswrapper[4814]: I0122 05:44:26.358323 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" path="/var/lib/kubelet/pods/28b2db45-ae9f-4fb7-89bb-e7225bfc33d8/volumes" Jan 22 05:44:26 crc kubenswrapper[4814]: I0122 05:44:26.634907 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-727qb"] Jan 22 05:44:26 crc kubenswrapper[4814]: I0122 05:44:26.635157 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-727qb" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerName="registry-server" containerID="cri-o://994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0" gracePeriod=2 Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.141600 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.328493 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-catalog-content\") pod \"8fcb452e-d5b1-4d61-b043-554ab64b082c\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.328577 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-utilities\") pod \"8fcb452e-d5b1-4d61-b043-554ab64b082c\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.328945 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vm2wz\" (UniqueName: \"kubernetes.io/projected/8fcb452e-d5b1-4d61-b043-554ab64b082c-kube-api-access-vm2wz\") pod \"8fcb452e-d5b1-4d61-b043-554ab64b082c\" (UID: \"8fcb452e-d5b1-4d61-b043-554ab64b082c\") " Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.329960 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-utilities" (OuterVolumeSpecName: "utilities") pod "8fcb452e-d5b1-4d61-b043-554ab64b082c" (UID: "8fcb452e-d5b1-4d61-b043-554ab64b082c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.342914 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fcb452e-d5b1-4d61-b043-554ab64b082c-kube-api-access-vm2wz" (OuterVolumeSpecName: "kube-api-access-vm2wz") pod "8fcb452e-d5b1-4d61-b043-554ab64b082c" (UID: "8fcb452e-d5b1-4d61-b043-554ab64b082c"). InnerVolumeSpecName "kube-api-access-vm2wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.385207 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8fcb452e-d5b1-4d61-b043-554ab64b082c" (UID: "8fcb452e-d5b1-4d61-b043-554ab64b082c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.431185 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.431227 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8fcb452e-d5b1-4d61-b043-554ab64b082c-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.431238 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vm2wz\" (UniqueName: \"kubernetes.io/projected/8fcb452e-d5b1-4d61-b043-554ab64b082c-kube-api-access-vm2wz\") on node \"crc\" DevicePath \"\"" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.638333 4814 generic.go:334] "Generic (PLEG): container finished" podID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerID="994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0" exitCode=0 Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.638381 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-727qb" event={"ID":"8fcb452e-d5b1-4d61-b043-554ab64b082c","Type":"ContainerDied","Data":"994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0"} Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.638410 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-727qb" event={"ID":"8fcb452e-d5b1-4d61-b043-554ab64b082c","Type":"ContainerDied","Data":"a6d463993a016c4e06702e74820d4e0b36a1bdad1776c01d5861c242fe924130"} Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.638433 4814 scope.go:117] "RemoveContainer" containerID="994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.638581 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-727qb" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.669915 4814 scope.go:117] "RemoveContainer" containerID="d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.690964 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-727qb"] Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.704035 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-727qb"] Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.714407 4814 scope.go:117] "RemoveContainer" containerID="f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.776524 4814 scope.go:117] "RemoveContainer" containerID="994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0" Jan 22 05:44:27 crc kubenswrapper[4814]: E0122 05:44:27.776937 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0\": container with ID starting with 994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0 not found: ID does not exist" containerID="994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.776975 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0"} err="failed to get container status \"994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0\": rpc error: code = NotFound desc = could not find container \"994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0\": container with ID starting with 994e0070b199affb4d939d9fa8b00eca6a434d30f1976551c2d9ccc325a980f0 not found: ID does not exist" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.777000 4814 scope.go:117] "RemoveContainer" containerID="d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea" Jan 22 05:44:27 crc kubenswrapper[4814]: E0122 05:44:27.777256 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea\": container with ID starting with d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea not found: ID does not exist" containerID="d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.777285 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea"} err="failed to get container status \"d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea\": rpc error: code = NotFound desc = could not find container \"d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea\": container with ID starting with d26caf40016b6f6ba6b1895e292bdbf1b96c4ec38ce6a8d1f083b6b640e9afea not found: ID does not exist" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.777306 4814 scope.go:117] "RemoveContainer" containerID="f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5" Jan 22 05:44:27 crc kubenswrapper[4814]: E0122 05:44:27.777541 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5\": container with ID starting with f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5 not found: ID does not exist" containerID="f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5" Jan 22 05:44:27 crc kubenswrapper[4814]: I0122 05:44:27.777573 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5"} err="failed to get container status \"f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5\": rpc error: code = NotFound desc = could not find container \"f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5\": container with ID starting with f25cc7d9dde32c3afb76baee49213cde5497eb1690f624647c2f50404b1dc6f5 not found: ID does not exist" Jan 22 05:44:28 crc kubenswrapper[4814]: I0122 05:44:28.354236 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" path="/var/lib/kubelet/pods/8fcb452e-d5b1-4d61-b043-554ab64b082c/volumes" Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.615566 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.616208 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.616325 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.618250 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.618328 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" gracePeriod=600 Jan 22 05:44:49 crc kubenswrapper[4814]: E0122 05:44:49.755186 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.879325 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" exitCode=0 Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.879392 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683"} Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.879438 4814 scope.go:117] "RemoveContainer" containerID="47eea733882c66d487823fb004595bb5b74593750bd6730a1b625e73c2be11e0" Jan 22 05:44:49 crc kubenswrapper[4814]: I0122 05:44:49.880509 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:44:49 crc kubenswrapper[4814]: E0122 05:44:49.881011 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.146678 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7"] Jan 22 05:45:00 crc kubenswrapper[4814]: E0122 05:45:00.147722 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerName="extract-content" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.147739 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerName="extract-content" Jan 22 05:45:00 crc kubenswrapper[4814]: E0122 05:45:00.147751 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerName="extract-utilities" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.147759 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerName="extract-utilities" Jan 22 05:45:00 crc kubenswrapper[4814]: E0122 05:45:00.147777 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerName="extract-content" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.147788 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerName="extract-content" Jan 22 05:45:00 crc kubenswrapper[4814]: E0122 05:45:00.147800 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerName="registry-server" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.147807 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerName="registry-server" Jan 22 05:45:00 crc kubenswrapper[4814]: E0122 05:45:00.147820 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerName="registry-server" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.147827 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerName="registry-server" Jan 22 05:45:00 crc kubenswrapper[4814]: E0122 05:45:00.147850 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerName="extract-utilities" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.147857 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerName="extract-utilities" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.148040 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fcb452e-d5b1-4d61-b043-554ab64b082c" containerName="registry-server" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.148067 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="28b2db45-ae9f-4fb7-89bb-e7225bfc33d8" containerName="registry-server" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.149019 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.152068 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.152231 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.157212 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7"] Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.334399 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-config-volume\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.334695 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-secret-volume\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.334811 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gwjx\" (UniqueName: \"kubernetes.io/projected/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-kube-api-access-6gwjx\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.437053 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gwjx\" (UniqueName: \"kubernetes.io/projected/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-kube-api-access-6gwjx\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.437248 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-config-volume\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.437278 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-secret-volume\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.438300 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-config-volume\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.449808 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-secret-volume\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.456696 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gwjx\" (UniqueName: \"kubernetes.io/projected/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-kube-api-access-6gwjx\") pod \"collect-profiles-29484345-rstm7\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.474490 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.928264 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7"] Jan 22 05:45:00 crc kubenswrapper[4814]: I0122 05:45:00.993475 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" event={"ID":"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2","Type":"ContainerStarted","Data":"73d462b9f95c25a6bc30bca724bc7450a3fbca1a14e919b364aa7d25887d5e2d"} Jan 22 05:45:01 crc kubenswrapper[4814]: I0122 05:45:01.344544 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:45:01 crc kubenswrapper[4814]: E0122 05:45:01.345057 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:45:02 crc kubenswrapper[4814]: I0122 05:45:02.009790 4814 generic.go:334] "Generic (PLEG): container finished" podID="9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2" containerID="5ca5b842c285f83ff6c889c29c7b7e3fd8aa7b512c89391b3126656c1c613105" exitCode=0 Jan 22 05:45:02 crc kubenswrapper[4814]: I0122 05:45:02.009844 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" event={"ID":"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2","Type":"ContainerDied","Data":"5ca5b842c285f83ff6c889c29c7b7e3fd8aa7b512c89391b3126656c1c613105"} Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.407789 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.505238 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-secret-volume\") pod \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.505406 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gwjx\" (UniqueName: \"kubernetes.io/projected/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-kube-api-access-6gwjx\") pod \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.505438 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-config-volume\") pod \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\" (UID: \"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2\") " Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.507072 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-config-volume" (OuterVolumeSpecName: "config-volume") pod "9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2" (UID: "9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.513767 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2" (UID: "9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.513828 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-kube-api-access-6gwjx" (OuterVolumeSpecName: "kube-api-access-6gwjx") pod "9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2" (UID: "9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2"). InnerVolumeSpecName "kube-api-access-6gwjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.609428 4814 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.609468 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gwjx\" (UniqueName: \"kubernetes.io/projected/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-kube-api-access-6gwjx\") on node \"crc\" DevicePath \"\"" Jan 22 05:45:03 crc kubenswrapper[4814]: I0122 05:45:03.609494 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 05:45:04 crc kubenswrapper[4814]: I0122 05:45:04.034948 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" event={"ID":"9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2","Type":"ContainerDied","Data":"73d462b9f95c25a6bc30bca724bc7450a3fbca1a14e919b364aa7d25887d5e2d"} Jan 22 05:45:04 crc kubenswrapper[4814]: I0122 05:45:04.035007 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73d462b9f95c25a6bc30bca724bc7450a3fbca1a14e919b364aa7d25887d5e2d" Jan 22 05:45:04 crc kubenswrapper[4814]: I0122 05:45:04.035494 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7" Jan 22 05:45:12 crc kubenswrapper[4814]: I0122 05:45:12.054144 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-4c34-account-create-update-bjrjx"] Jan 22 05:45:12 crc kubenswrapper[4814]: I0122 05:45:12.073030 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-4c34-account-create-update-bjrjx"] Jan 22 05:45:12 crc kubenswrapper[4814]: I0122 05:45:12.344995 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:45:12 crc kubenswrapper[4814]: E0122 05:45:12.345392 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:45:12 crc kubenswrapper[4814]: I0122 05:45:12.358307 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1607a334-0e23-4696-8a95-e364d28fca56" path="/var/lib/kubelet/pods/1607a334-0e23-4696-8a95-e364d28fca56/volumes" Jan 22 05:45:13 crc kubenswrapper[4814]: I0122 05:45:13.041484 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-b8mdj"] Jan 22 05:45:13 crc kubenswrapper[4814]: I0122 05:45:13.056725 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-6wgx7"] Jan 22 05:45:13 crc kubenswrapper[4814]: I0122 05:45:13.074010 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-533f-account-create-update-w8ktx"] Jan 22 05:45:13 crc kubenswrapper[4814]: I0122 05:45:13.082293 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-b8mdj"] Jan 22 05:45:13 crc kubenswrapper[4814]: I0122 05:45:13.093278 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-533f-account-create-update-w8ktx"] Jan 22 05:45:13 crc kubenswrapper[4814]: I0122 05:45:13.101692 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-6wgx7"] Jan 22 05:45:14 crc kubenswrapper[4814]: I0122 05:45:14.377098 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36d7b817-7906-48c6-ac41-d277d095531c" path="/var/lib/kubelet/pods/36d7b817-7906-48c6-ac41-d277d095531c/volumes" Jan 22 05:45:14 crc kubenswrapper[4814]: I0122 05:45:14.378342 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b21045ad-7166-4a40-abb8-8337cbcb1220" path="/var/lib/kubelet/pods/b21045ad-7166-4a40-abb8-8337cbcb1220/volumes" Jan 22 05:45:14 crc kubenswrapper[4814]: I0122 05:45:14.379108 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc87bfbf-1c0b-4502-8ad7-8913a6099bf0" path="/var/lib/kubelet/pods/fc87bfbf-1c0b-4502-8ad7-8913a6099bf0/volumes" Jan 22 05:45:16 crc kubenswrapper[4814]: I0122 05:45:16.142497 4814 generic.go:334] "Generic (PLEG): container finished" podID="7110037d-448b-4fb2-bc41-38848890a505" containerID="ceb660bec2e4b9b38ae4d43043573a22df9b16750ff0bd7762a7d1c049dd047e" exitCode=0 Jan 22 05:45:16 crc kubenswrapper[4814]: I0122 05:45:16.142932 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" event={"ID":"7110037d-448b-4fb2-bc41-38848890a505","Type":"ContainerDied","Data":"ceb660bec2e4b9b38ae4d43043573a22df9b16750ff0bd7762a7d1c049dd047e"} Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.597117 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.769059 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-bootstrap-combined-ca-bundle\") pod \"7110037d-448b-4fb2-bc41-38848890a505\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.769132 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-inventory\") pod \"7110037d-448b-4fb2-bc41-38848890a505\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.769370 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-ssh-key-openstack-edpm-ipam\") pod \"7110037d-448b-4fb2-bc41-38848890a505\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.769489 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl57k\" (UniqueName: \"kubernetes.io/projected/7110037d-448b-4fb2-bc41-38848890a505-kube-api-access-bl57k\") pod \"7110037d-448b-4fb2-bc41-38848890a505\" (UID: \"7110037d-448b-4fb2-bc41-38848890a505\") " Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.777308 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7110037d-448b-4fb2-bc41-38848890a505-kube-api-access-bl57k" (OuterVolumeSpecName: "kube-api-access-bl57k") pod "7110037d-448b-4fb2-bc41-38848890a505" (UID: "7110037d-448b-4fb2-bc41-38848890a505"). InnerVolumeSpecName "kube-api-access-bl57k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.778130 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7110037d-448b-4fb2-bc41-38848890a505" (UID: "7110037d-448b-4fb2-bc41-38848890a505"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.800247 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-inventory" (OuterVolumeSpecName: "inventory") pod "7110037d-448b-4fb2-bc41-38848890a505" (UID: "7110037d-448b-4fb2-bc41-38848890a505"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.805854 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7110037d-448b-4fb2-bc41-38848890a505" (UID: "7110037d-448b-4fb2-bc41-38848890a505"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.874434 4814 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.874495 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.874516 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7110037d-448b-4fb2-bc41-38848890a505-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:45:17 crc kubenswrapper[4814]: I0122 05:45:17.874533 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl57k\" (UniqueName: \"kubernetes.io/projected/7110037d-448b-4fb2-bc41-38848890a505-kube-api-access-bl57k\") on node \"crc\" DevicePath \"\"" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.031102 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-ecb3-account-create-update-72l64"] Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.040380 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-ecb3-account-create-update-72l64"] Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.049913 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-h6rcw"] Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.059038 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-h6rcw"] Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.167984 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" event={"ID":"7110037d-448b-4fb2-bc41-38848890a505","Type":"ContainerDied","Data":"b66c9d222912f03a605eb1877c7735851bb03041ce6bce9ccd531f37f0960ca9"} Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.168021 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b66c9d222912f03a605eb1877c7735851bb03041ce6bce9ccd531f37f0960ca9" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.168062 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-m46tk" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.264601 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k"] Jan 22 05:45:18 crc kubenswrapper[4814]: E0122 05:45:18.264999 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7110037d-448b-4fb2-bc41-38848890a505" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.265016 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7110037d-448b-4fb2-bc41-38848890a505" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 22 05:45:18 crc kubenswrapper[4814]: E0122 05:45:18.265042 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2" containerName="collect-profiles" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.265048 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2" containerName="collect-profiles" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.265204 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2" containerName="collect-profiles" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.265231 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7110037d-448b-4fb2-bc41-38848890a505" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.265775 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.267664 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.268065 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.268518 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.268776 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.331337 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k"] Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.358964 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="508ac5a0-f0fa-4e7c-bb63-1fac194d7545" path="/var/lib/kubelet/pods/508ac5a0-f0fa-4e7c-bb63-1fac194d7545/volumes" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.359889 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec22ee17-7d0d-45fe-9059-0d8f059ee212" path="/var/lib/kubelet/pods/ec22ee17-7d0d-45fe-9059-0d8f059ee212/volumes" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.385995 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.386113 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4ptw\" (UniqueName: \"kubernetes.io/projected/489f5374-afc9-4b09-b59f-4b6f1cac86e7-kube-api-access-k4ptw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.386171 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.487656 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.487747 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4ptw\" (UniqueName: \"kubernetes.io/projected/489f5374-afc9-4b09-b59f-4b6f1cac86e7-kube-api-access-k4ptw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.487829 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.496512 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.504853 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.511007 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4ptw\" (UniqueName: \"kubernetes.io/projected/489f5374-afc9-4b09-b59f-4b6f1cac86e7-kube-api-access-k4ptw\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:18 crc kubenswrapper[4814]: I0122 05:45:18.585770 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:45:19 crc kubenswrapper[4814]: I0122 05:45:19.118196 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k"] Jan 22 05:45:19 crc kubenswrapper[4814]: I0122 05:45:19.177867 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" event={"ID":"489f5374-afc9-4b09-b59f-4b6f1cac86e7","Type":"ContainerStarted","Data":"95caef8f176ff7f2bd8da390b78505db90afc0040231ff508bc586e698912655"} Jan 22 05:45:20 crc kubenswrapper[4814]: I0122 05:45:20.186866 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" event={"ID":"489f5374-afc9-4b09-b59f-4b6f1cac86e7","Type":"ContainerStarted","Data":"ca0df813f75568ee18f6c47017afeeb0551a71b4afbc724f991e616a3c0874bb"} Jan 22 05:45:20 crc kubenswrapper[4814]: I0122 05:45:20.203927 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" podStartSLOduration=1.693249854 podStartE2EDuration="2.20391104s" podCreationTimestamp="2026-01-22 05:45:18 +0000 UTC" firstStartedPulling="2026-01-22 05:45:19.137659242 +0000 UTC m=+1605.221147447" lastFinishedPulling="2026-01-22 05:45:19.648320398 +0000 UTC m=+1605.731808633" observedRunningTime="2026-01-22 05:45:20.200196674 +0000 UTC m=+1606.283684889" watchObservedRunningTime="2026-01-22 05:45:20.20391104 +0000 UTC m=+1606.287399245" Jan 22 05:45:26 crc kubenswrapper[4814]: I0122 05:45:26.343602 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:45:26 crc kubenswrapper[4814]: E0122 05:45:26.344550 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:45:38 crc kubenswrapper[4814]: I0122 05:45:38.050317 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-bl945"] Jan 22 05:45:38 crc kubenswrapper[4814]: I0122 05:45:38.057947 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-bl945"] Jan 22 05:45:38 crc kubenswrapper[4814]: I0122 05:45:38.363447 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="729bdb41-4e49-4df6-a581-87fb0db6f3a0" path="/var/lib/kubelet/pods/729bdb41-4e49-4df6-a581-87fb0db6f3a0/volumes" Jan 22 05:45:40 crc kubenswrapper[4814]: I0122 05:45:40.344055 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:45:40 crc kubenswrapper[4814]: E0122 05:45:40.344523 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.132643 4814 scope.go:117] "RemoveContainer" containerID="cff5bfa1fc1c89ade652bfadd0539603e46ff841066b9f5603b7c8b2790bd51e" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.170740 4814 scope.go:117] "RemoveContainer" containerID="535a38b9def1366acf26b3372d3ce95e22f01f531ac03cac77134ffad5b26896" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.232975 4814 scope.go:117] "RemoveContainer" containerID="d96df5a750cdac9bc537a894c31db6553bd3a66088c31bb1a3877b190e6766ac" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.285925 4814 scope.go:117] "RemoveContainer" containerID="c9e18213160b6ff64567d2f0b984472b4da06239c71bf9a0b7c2aa8b1f8bfd0a" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.314574 4814 scope.go:117] "RemoveContainer" containerID="8d2af743a7aca9ff498254942665cc91da4e8afba5629eec624cca4256c2d9c2" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.339386 4814 scope.go:117] "RemoveContainer" containerID="daf8289d134302fe9ca2a77b129432b464d66ddefc88c9b0427d879625f34116" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.379137 4814 scope.go:117] "RemoveContainer" containerID="e1e3a3b7c7353a02d60300e36dee3a8f84f35c8a52f88100867d6d486fe787f0" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.430732 4814 scope.go:117] "RemoveContainer" containerID="26dbc5f9cc31a682231fc1cd1937469cbf05a324720be13945a4eb88ea09f0fa" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.473661 4814 scope.go:117] "RemoveContainer" containerID="a317f0fc6065690b1500e088e81a5683312fea0a30967ff2a4143434f1ac7723" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.502398 4814 scope.go:117] "RemoveContainer" containerID="58fe8cb4ecd4275bf87ccf4585454cce95f776ae5ff14645419b68e9a32e73a7" Jan 22 05:45:41 crc kubenswrapper[4814]: I0122 05:45:41.522168 4814 scope.go:117] "RemoveContainer" containerID="676df7333b92940ecaed8508f407a997766a031d45a76178f1feea9c4fbc2141" Jan 22 05:45:42 crc kubenswrapper[4814]: I0122 05:45:42.051186 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-qwtsm"] Jan 22 05:45:42 crc kubenswrapper[4814]: I0122 05:45:42.066169 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-qwtsm"] Jan 22 05:45:42 crc kubenswrapper[4814]: I0122 05:45:42.363740 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9dc758e-58fd-4232-adf3-f9c9de238a9f" path="/var/lib/kubelet/pods/b9dc758e-58fd-4232-adf3-f9c9de238a9f/volumes" Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.051448 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-snjhg"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.075475 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-81fe-account-create-update-pqrz5"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.091581 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-81fe-account-create-update-pqrz5"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.101546 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-zjkzp"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.110463 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-snjhg"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.118043 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-1385-account-create-update-nqh2m"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.125019 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-vb7mm"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.132135 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-zjkzp"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.139167 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-vb7mm"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.148552 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-1385-account-create-update-nqh2m"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.156593 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-9bcb-account-create-update-vvs4l"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.164660 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-9bcb-account-create-update-vvs4l"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.172145 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-4d21-account-create-update-vf2t4"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.179197 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-4d21-account-create-update-vf2t4"] Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.364552 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cb80edb-5104-4f39-b8a5-2c285bdc1ff1" path="/var/lib/kubelet/pods/0cb80edb-5104-4f39-b8a5-2c285bdc1ff1/volumes" Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.367801 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3631751f-0878-4972-a191-ff026a644832" path="/var/lib/kubelet/pods/3631751f-0878-4972-a191-ff026a644832/volumes" Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.370496 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac" path="/var/lib/kubelet/pods/4b16ab55-25e3-4a6e-9ffd-f286d9f2dfac/volumes" Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.372564 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8f36d3f-f478-4067-b71b-c799da7e07d9" path="/var/lib/kubelet/pods/a8f36d3f-f478-4067-b71b-c799da7e07d9/volumes" Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.375229 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9340e06-3a50-4f01-9314-44e5786484e1" path="/var/lib/kubelet/pods/a9340e06-3a50-4f01-9314-44e5786484e1/volumes" Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.378439 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc033b9f-32ee-44ed-85b1-4655c687ffe9" path="/var/lib/kubelet/pods/bc033b9f-32ee-44ed-85b1-4655c687ffe9/volumes" Jan 22 05:45:46 crc kubenswrapper[4814]: I0122 05:45:46.381381 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c71918d0-b384-4bf5-b8ee-a338ff72d9e9" path="/var/lib/kubelet/pods/c71918d0-b384-4bf5-b8ee-a338ff72d9e9/volumes" Jan 22 05:45:48 crc kubenswrapper[4814]: I0122 05:45:48.032443 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-st6qn"] Jan 22 05:45:48 crc kubenswrapper[4814]: I0122 05:45:48.043350 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-st6qn"] Jan 22 05:45:48 crc kubenswrapper[4814]: I0122 05:45:48.357465 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65c32786-ef8f-4498-aaef-4ec1dcebc57d" path="/var/lib/kubelet/pods/65c32786-ef8f-4498-aaef-4ec1dcebc57d/volumes" Jan 22 05:45:51 crc kubenswrapper[4814]: I0122 05:45:51.344126 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:45:51 crc kubenswrapper[4814]: E0122 05:45:51.346214 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:45:52 crc kubenswrapper[4814]: I0122 05:45:52.040068 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-4lt4r"] Jan 22 05:45:52 crc kubenswrapper[4814]: I0122 05:45:52.050045 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-4lt4r"] Jan 22 05:45:52 crc kubenswrapper[4814]: I0122 05:45:52.363107 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979" path="/var/lib/kubelet/pods/b50b5b5c-6d5a-4c78-8a4a-0e8247d9a979/volumes" Jan 22 05:46:06 crc kubenswrapper[4814]: I0122 05:46:06.343702 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:46:06 crc kubenswrapper[4814]: E0122 05:46:06.344332 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:46:17 crc kubenswrapper[4814]: I0122 05:46:17.344117 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:46:17 crc kubenswrapper[4814]: E0122 05:46:17.345204 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:46:28 crc kubenswrapper[4814]: I0122 05:46:28.344237 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:46:28 crc kubenswrapper[4814]: E0122 05:46:28.345116 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:46:36 crc kubenswrapper[4814]: I0122 05:46:36.060735 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-rpl5x"] Jan 22 05:46:36 crc kubenswrapper[4814]: I0122 05:46:36.082454 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-rpl5x"] Jan 22 05:46:36 crc kubenswrapper[4814]: I0122 05:46:36.362017 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca" path="/var/lib/kubelet/pods/6058f7fe-4c73-4b1b-81c5-ffe8f0a830ca/volumes" Jan 22 05:46:40 crc kubenswrapper[4814]: I0122 05:46:40.344388 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:46:40 crc kubenswrapper[4814]: E0122 05:46:40.345361 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:46:41 crc kubenswrapper[4814]: I0122 05:46:41.727195 4814 scope.go:117] "RemoveContainer" containerID="5418634d65afda0304ddb4ca14c9a6ebce7c8c59a2dfb6c35929c9506170b69a" Jan 22 05:46:41 crc kubenswrapper[4814]: I0122 05:46:41.762765 4814 scope.go:117] "RemoveContainer" containerID="15ffb9cdfb3e5de75db0fc8522561149f16fc5f39a57f4270f3be516a8641af3" Jan 22 05:46:41 crc kubenswrapper[4814]: I0122 05:46:41.815704 4814 scope.go:117] "RemoveContainer" containerID="99c910021a2c2854670a7428a68423d92923a30fef890f0fb706885e74d6d973" Jan 22 05:46:41 crc kubenswrapper[4814]: I0122 05:46:41.851770 4814 scope.go:117] "RemoveContainer" containerID="326e264611b4534e81b504b48543ea7a88696075c43b9f4c3e976f8940bdbc8e" Jan 22 05:46:41 crc kubenswrapper[4814]: I0122 05:46:41.895863 4814 scope.go:117] "RemoveContainer" containerID="9ae96210948d6f5cdf7bfe3e94d50f77e834c4210ec97455b778d8b80c643b83" Jan 22 05:46:41 crc kubenswrapper[4814]: I0122 05:46:41.936808 4814 scope.go:117] "RemoveContainer" containerID="5cfd5a5c716f1bcc395c2f6b4d84a5b72a1218ad1ea5d63a591a9f5700c3f854" Jan 22 05:46:41 crc kubenswrapper[4814]: I0122 05:46:41.992329 4814 scope.go:117] "RemoveContainer" containerID="5fd8aef9f5b5e015e12b747ea333dbba33a7e80c266fde34e7e858499a61ea88" Jan 22 05:46:42 crc kubenswrapper[4814]: I0122 05:46:42.034966 4814 scope.go:117] "RemoveContainer" containerID="79e7df3871c956730177ee4046e1b78ca68df163829e50fa865215eb65440b0c" Jan 22 05:46:42 crc kubenswrapper[4814]: I0122 05:46:42.055019 4814 scope.go:117] "RemoveContainer" containerID="af71b291dc88af0eb592a02b111c9c780b02a7bdaced65b2a255365011bc3217" Jan 22 05:46:42 crc kubenswrapper[4814]: I0122 05:46:42.076748 4814 scope.go:117] "RemoveContainer" containerID="1e0c50551c24b420ee7d50dc7ac465d53663130cf8564ed6c0dec8119b48025a" Jan 22 05:46:42 crc kubenswrapper[4814]: I0122 05:46:42.096875 4814 scope.go:117] "RemoveContainer" containerID="7d1ab6d13de22aad2109f1a0b0ec69caa673eeee3dbdfa188178d9f5b6ac8265" Jan 22 05:46:49 crc kubenswrapper[4814]: I0122 05:46:49.058836 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-n8qnk"] Jan 22 05:46:49 crc kubenswrapper[4814]: I0122 05:46:49.070999 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-n8qnk"] Jan 22 05:46:49 crc kubenswrapper[4814]: I0122 05:46:49.081983 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-vd5qz"] Jan 22 05:46:49 crc kubenswrapper[4814]: I0122 05:46:49.091696 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-vd5qz"] Jan 22 05:46:49 crc kubenswrapper[4814]: I0122 05:46:49.098267 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-fvkql"] Jan 22 05:46:49 crc kubenswrapper[4814]: I0122 05:46:49.107140 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-fvkql"] Jan 22 05:46:50 crc kubenswrapper[4814]: I0122 05:46:50.355934 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="458612b9-c230-4db2-82d3-0a1b8fbe81f1" path="/var/lib/kubelet/pods/458612b9-c230-4db2-82d3-0a1b8fbe81f1/volumes" Jan 22 05:46:50 crc kubenswrapper[4814]: I0122 05:46:50.358065 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81f9b3f5-db07-49ef-933f-ef90f1c017f6" path="/var/lib/kubelet/pods/81f9b3f5-db07-49ef-933f-ef90f1c017f6/volumes" Jan 22 05:46:50 crc kubenswrapper[4814]: I0122 05:46:50.359539 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aad42960-bd7f-4a6a-9d2b-74cf1b7084a3" path="/var/lib/kubelet/pods/aad42960-bd7f-4a6a-9d2b-74cf1b7084a3/volumes" Jan 22 05:46:53 crc kubenswrapper[4814]: I0122 05:46:53.344286 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:46:53 crc kubenswrapper[4814]: E0122 05:46:53.345242 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:47:01 crc kubenswrapper[4814]: I0122 05:47:01.032432 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-rfq2m"] Jan 22 05:47:01 crc kubenswrapper[4814]: I0122 05:47:01.040997 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-rfq2m"] Jan 22 05:47:02 crc kubenswrapper[4814]: I0122 05:47:02.355579 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66536b14-9f47-4fe1-bc77-583a4ffff700" path="/var/lib/kubelet/pods/66536b14-9f47-4fe1-bc77-583a4ffff700/volumes" Jan 22 05:47:03 crc kubenswrapper[4814]: I0122 05:47:03.035027 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-mk8qf"] Jan 22 05:47:03 crc kubenswrapper[4814]: I0122 05:47:03.044214 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-mk8qf"] Jan 22 05:47:04 crc kubenswrapper[4814]: I0122 05:47:04.361005 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6af8690c-751e-4196-b6f4-db21950c5ec7" path="/var/lib/kubelet/pods/6af8690c-751e-4196-b6f4-db21950c5ec7/volumes" Jan 22 05:47:05 crc kubenswrapper[4814]: I0122 05:47:05.343757 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:47:05 crc kubenswrapper[4814]: E0122 05:47:05.344028 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:47:16 crc kubenswrapper[4814]: I0122 05:47:16.520559 4814 generic.go:334] "Generic (PLEG): container finished" podID="489f5374-afc9-4b09-b59f-4b6f1cac86e7" containerID="ca0df813f75568ee18f6c47017afeeb0551a71b4afbc724f991e616a3c0874bb" exitCode=0 Jan 22 05:47:16 crc kubenswrapper[4814]: I0122 05:47:16.521103 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" event={"ID":"489f5374-afc9-4b09-b59f-4b6f1cac86e7","Type":"ContainerDied","Data":"ca0df813f75568ee18f6c47017afeeb0551a71b4afbc724f991e616a3c0874bb"} Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.055527 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.102067 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-ssh-key-openstack-edpm-ipam\") pod \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.102559 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-inventory\") pod \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.102714 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4ptw\" (UniqueName: \"kubernetes.io/projected/489f5374-afc9-4b09-b59f-4b6f1cac86e7-kube-api-access-k4ptw\") pod \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\" (UID: \"489f5374-afc9-4b09-b59f-4b6f1cac86e7\") " Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.112944 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/489f5374-afc9-4b09-b59f-4b6f1cac86e7-kube-api-access-k4ptw" (OuterVolumeSpecName: "kube-api-access-k4ptw") pod "489f5374-afc9-4b09-b59f-4b6f1cac86e7" (UID: "489f5374-afc9-4b09-b59f-4b6f1cac86e7"). InnerVolumeSpecName "kube-api-access-k4ptw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.130991 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-inventory" (OuterVolumeSpecName: "inventory") pod "489f5374-afc9-4b09-b59f-4b6f1cac86e7" (UID: "489f5374-afc9-4b09-b59f-4b6f1cac86e7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.146115 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "489f5374-afc9-4b09-b59f-4b6f1cac86e7" (UID: "489f5374-afc9-4b09-b59f-4b6f1cac86e7"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.204965 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4ptw\" (UniqueName: \"kubernetes.io/projected/489f5374-afc9-4b09-b59f-4b6f1cac86e7-kube-api-access-k4ptw\") on node \"crc\" DevicePath \"\"" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.204991 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.205002 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/489f5374-afc9-4b09-b59f-4b6f1cac86e7-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.547517 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" event={"ID":"489f5374-afc9-4b09-b59f-4b6f1cac86e7","Type":"ContainerDied","Data":"95caef8f176ff7f2bd8da390b78505db90afc0040231ff508bc586e698912655"} Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.547572 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95caef8f176ff7f2bd8da390b78505db90afc0040231ff508bc586e698912655" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.547621 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-fzs4k" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.650719 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5"] Jan 22 05:47:18 crc kubenswrapper[4814]: E0122 05:47:18.651350 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="489f5374-afc9-4b09-b59f-4b6f1cac86e7" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.651365 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="489f5374-afc9-4b09-b59f-4b6f1cac86e7" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.651561 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="489f5374-afc9-4b09-b59f-4b6f1cac86e7" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.652188 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.654329 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.655689 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.655883 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.656004 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.663389 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5"] Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.717914 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9j49w\" (UniqueName: \"kubernetes.io/projected/166e22f3-75a2-4437-906e-901594becc33-kube-api-access-9j49w\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.718009 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.718095 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.819785 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.819881 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9j49w\" (UniqueName: \"kubernetes.io/projected/166e22f3-75a2-4437-906e-901594becc33-kube-api-access-9j49w\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.819985 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.823192 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.824068 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.834469 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9j49w\" (UniqueName: \"kubernetes.io/projected/166e22f3-75a2-4437-906e-901594becc33-kube-api-access-9j49w\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:18 crc kubenswrapper[4814]: I0122 05:47:18.968585 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:47:19 crc kubenswrapper[4814]: I0122 05:47:19.558588 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5"] Jan 22 05:47:20 crc kubenswrapper[4814]: I0122 05:47:20.344246 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:47:20 crc kubenswrapper[4814]: E0122 05:47:20.344757 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:47:20 crc kubenswrapper[4814]: I0122 05:47:20.570065 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" event={"ID":"166e22f3-75a2-4437-906e-901594becc33","Type":"ContainerStarted","Data":"3654d2954ec0e7068aae250ba8b25d899cb792a487e782b91478f9e16f66490b"} Jan 22 05:47:22 crc kubenswrapper[4814]: I0122 05:47:22.594249 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" event={"ID":"166e22f3-75a2-4437-906e-901594becc33","Type":"ContainerStarted","Data":"5da23689112c971d7c552f6072214aa6b44bf99965851145ff909709505af24f"} Jan 22 05:47:22 crc kubenswrapper[4814]: I0122 05:47:22.622099 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" podStartSLOduration=2.197446729 podStartE2EDuration="4.622079609s" podCreationTimestamp="2026-01-22 05:47:18 +0000 UTC" firstStartedPulling="2026-01-22 05:47:19.572842333 +0000 UTC m=+1725.656330548" lastFinishedPulling="2026-01-22 05:47:21.997475213 +0000 UTC m=+1728.080963428" observedRunningTime="2026-01-22 05:47:22.61662106 +0000 UTC m=+1728.700109285" watchObservedRunningTime="2026-01-22 05:47:22.622079609 +0000 UTC m=+1728.705567824" Jan 22 05:47:33 crc kubenswrapper[4814]: I0122 05:47:33.343771 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:47:33 crc kubenswrapper[4814]: E0122 05:47:33.345507 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:47:42 crc kubenswrapper[4814]: I0122 05:47:42.314899 4814 scope.go:117] "RemoveContainer" containerID="3063722d9c63a1df0619c8dca81b89c685f38221550c110561ad2f90e7bbed23" Jan 22 05:47:42 crc kubenswrapper[4814]: I0122 05:47:42.340972 4814 scope.go:117] "RemoveContainer" containerID="f8da10c693fdef6d2e86919c1d80ab87c23d84c6c5362d0c4933a612f3d9545b" Jan 22 05:47:42 crc kubenswrapper[4814]: I0122 05:47:42.408559 4814 scope.go:117] "RemoveContainer" containerID="d7f6bb631b94bc34035a198577d446febd1954744ddfb1fb353107eb9b654ba5" Jan 22 05:47:42 crc kubenswrapper[4814]: I0122 05:47:42.471823 4814 scope.go:117] "RemoveContainer" containerID="4452e3c773b579c12751fa7425bc88f50efdbba9a5b14cee90cbcf566531a877" Jan 22 05:47:42 crc kubenswrapper[4814]: I0122 05:47:42.559778 4814 scope.go:117] "RemoveContainer" containerID="886b309406e11939d37f7f3b6d5f89b974eb0dfda41f0db283ab48142dede4ce" Jan 22 05:47:45 crc kubenswrapper[4814]: I0122 05:47:45.051550 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-5bec-account-create-update-br287"] Jan 22 05:47:45 crc kubenswrapper[4814]: I0122 05:47:45.065422 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-5bec-account-create-update-br287"] Jan 22 05:47:45 crc kubenswrapper[4814]: I0122 05:47:45.344063 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:47:45 crc kubenswrapper[4814]: E0122 05:47:45.344473 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:47:46 crc kubenswrapper[4814]: I0122 05:47:46.354225 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b993bf4-b353-4ca1-a01c-cfbae095a030" path="/var/lib/kubelet/pods/9b993bf4-b353-4ca1-a01c-cfbae095a030/volumes" Jan 22 05:47:47 crc kubenswrapper[4814]: I0122 05:47:47.050755 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-a091-account-create-update-pkqsv"] Jan 22 05:47:47 crc kubenswrapper[4814]: I0122 05:47:47.066352 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-xlbkn"] Jan 22 05:47:47 crc kubenswrapper[4814]: I0122 05:47:47.077852 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-a091-account-create-update-pkqsv"] Jan 22 05:47:47 crc kubenswrapper[4814]: I0122 05:47:47.086620 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-xlbkn"] Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.026327 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-e0c7-account-create-update-gr2s7"] Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.033458 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-7dddp"] Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.041210 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-tcp2s"] Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.048319 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-e0c7-account-create-update-gr2s7"] Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.055337 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-7dddp"] Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.066289 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-tcp2s"] Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.363079 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="347d9325-3d74-4146-94fe-c469e83043c9" path="/var/lib/kubelet/pods/347d9325-3d74-4146-94fe-c469e83043c9/volumes" Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.366060 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b09adfa-27dd-431e-a1ad-4ddd7f308c8e" path="/var/lib/kubelet/pods/5b09adfa-27dd-431e-a1ad-4ddd7f308c8e/volumes" Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.367154 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67b4bdd2-9667-497f-a85c-25d2c479e713" path="/var/lib/kubelet/pods/67b4bdd2-9667-497f-a85c-25d2c479e713/volumes" Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.367800 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8975f4f8-ca9b-483c-9627-266538c2036f" path="/var/lib/kubelet/pods/8975f4f8-ca9b-483c-9627-266538c2036f/volumes" Jan 22 05:47:48 crc kubenswrapper[4814]: I0122 05:47:48.368711 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fee64fd2-2b4c-4b2c-9041-590c282c2e5b" path="/var/lib/kubelet/pods/fee64fd2-2b4c-4b2c-9041-590c282c2e5b/volumes" Jan 22 05:48:00 crc kubenswrapper[4814]: I0122 05:48:00.344052 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:48:00 crc kubenswrapper[4814]: E0122 05:48:00.344656 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:48:15 crc kubenswrapper[4814]: I0122 05:48:15.344291 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:48:15 crc kubenswrapper[4814]: E0122 05:48:15.345289 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:48:30 crc kubenswrapper[4814]: I0122 05:48:30.343996 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:48:30 crc kubenswrapper[4814]: E0122 05:48:30.345207 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:48:34 crc kubenswrapper[4814]: I0122 05:48:34.066117 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z4pzm"] Jan 22 05:48:34 crc kubenswrapper[4814]: I0122 05:48:34.080537 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-z4pzm"] Jan 22 05:48:34 crc kubenswrapper[4814]: I0122 05:48:34.364457 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1fd8e88-42ab-43bb-8697-c7aebb8fec34" path="/var/lib/kubelet/pods/f1fd8e88-42ab-43bb-8697-c7aebb8fec34/volumes" Jan 22 05:48:42 crc kubenswrapper[4814]: I0122 05:48:42.344905 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:48:42 crc kubenswrapper[4814]: E0122 05:48:42.346309 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:48:42 crc kubenswrapper[4814]: I0122 05:48:42.656041 4814 scope.go:117] "RemoveContainer" containerID="b54f70c21d7b84c566a44ab516c7816eaf57e58dce24c3e7b2dcee555e9ace84" Jan 22 05:48:42 crc kubenswrapper[4814]: I0122 05:48:42.683451 4814 scope.go:117] "RemoveContainer" containerID="e288cb59351fc005b9dba1c8df7b2bedd54c4b88133f7fe3756bfa12bbcd2cd6" Jan 22 05:48:42 crc kubenswrapper[4814]: I0122 05:48:42.734954 4814 scope.go:117] "RemoveContainer" containerID="3bd841d6331c4c9c4b6db623e622e282424b2a2bf7294bc87b9a3559e4574fcc" Jan 22 05:48:42 crc kubenswrapper[4814]: I0122 05:48:42.774423 4814 scope.go:117] "RemoveContainer" containerID="92b63d94cecd3fc08e467870add701dee2bc20eb579b6e3e79f689710dc87e7a" Jan 22 05:48:42 crc kubenswrapper[4814]: I0122 05:48:42.815316 4814 scope.go:117] "RemoveContainer" containerID="d2a0cb12391ffc95eec010198852e33f884bb4a62d58fc1194b37b10e2546a0b" Jan 22 05:48:42 crc kubenswrapper[4814]: I0122 05:48:42.853948 4814 scope.go:117] "RemoveContainer" containerID="13226bc0b5edea2b2fb1487c9460153283f100abd66290938c62b3e22ec4a84a" Jan 22 05:48:42 crc kubenswrapper[4814]: I0122 05:48:42.894760 4814 scope.go:117] "RemoveContainer" containerID="eb0c548282539e794c3ee29d0f306e6c108d20301212e4cb421f3fecc21065a8" Jan 22 05:48:47 crc kubenswrapper[4814]: I0122 05:48:47.239958 4814 generic.go:334] "Generic (PLEG): container finished" podID="166e22f3-75a2-4437-906e-901594becc33" containerID="5da23689112c971d7c552f6072214aa6b44bf99965851145ff909709505af24f" exitCode=0 Jan 22 05:48:47 crc kubenswrapper[4814]: I0122 05:48:47.240086 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" event={"ID":"166e22f3-75a2-4437-906e-901594becc33","Type":"ContainerDied","Data":"5da23689112c971d7c552f6072214aa6b44bf99965851145ff909709505af24f"} Jan 22 05:48:48 crc kubenswrapper[4814]: I0122 05:48:48.826161 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:48:48 crc kubenswrapper[4814]: I0122 05:48:48.956450 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9j49w\" (UniqueName: \"kubernetes.io/projected/166e22f3-75a2-4437-906e-901594becc33-kube-api-access-9j49w\") pod \"166e22f3-75a2-4437-906e-901594becc33\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " Jan 22 05:48:48 crc kubenswrapper[4814]: I0122 05:48:48.956549 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-ssh-key-openstack-edpm-ipam\") pod \"166e22f3-75a2-4437-906e-901594becc33\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " Jan 22 05:48:48 crc kubenswrapper[4814]: I0122 05:48:48.956712 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-inventory\") pod \"166e22f3-75a2-4437-906e-901594becc33\" (UID: \"166e22f3-75a2-4437-906e-901594becc33\") " Jan 22 05:48:48 crc kubenswrapper[4814]: I0122 05:48:48.964851 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/166e22f3-75a2-4437-906e-901594becc33-kube-api-access-9j49w" (OuterVolumeSpecName: "kube-api-access-9j49w") pod "166e22f3-75a2-4437-906e-901594becc33" (UID: "166e22f3-75a2-4437-906e-901594becc33"). InnerVolumeSpecName "kube-api-access-9j49w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:48:48 crc kubenswrapper[4814]: I0122 05:48:48.984969 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "166e22f3-75a2-4437-906e-901594becc33" (UID: "166e22f3-75a2-4437-906e-901594becc33"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:48:48 crc kubenswrapper[4814]: I0122 05:48:48.985362 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-inventory" (OuterVolumeSpecName: "inventory") pod "166e22f3-75a2-4437-906e-901594becc33" (UID: "166e22f3-75a2-4437-906e-901594becc33"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.059004 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.059036 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9j49w\" (UniqueName: \"kubernetes.io/projected/166e22f3-75a2-4437-906e-901594becc33-kube-api-access-9j49w\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.059049 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/166e22f3-75a2-4437-906e-901594becc33-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.279367 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" event={"ID":"166e22f3-75a2-4437-906e-901594becc33","Type":"ContainerDied","Data":"3654d2954ec0e7068aae250ba8b25d899cb792a487e782b91478f9e16f66490b"} Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.279427 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3654d2954ec0e7068aae250ba8b25d899cb792a487e782b91478f9e16f66490b" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.279506 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-hlbd5" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.380257 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g"] Jan 22 05:48:49 crc kubenswrapper[4814]: E0122 05:48:49.380996 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="166e22f3-75a2-4437-906e-901594becc33" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.381016 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="166e22f3-75a2-4437-906e-901594becc33" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.381242 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="166e22f3-75a2-4437-906e-901594becc33" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.382008 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.385661 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.385854 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.386029 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.386521 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.396737 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g"] Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.568112 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.568416 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.568766 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6tfb\" (UniqueName: \"kubernetes.io/projected/a910d065-4dec-45e9-a81c-b06598b7b836-kube-api-access-v6tfb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.670906 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.671080 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6tfb\" (UniqueName: \"kubernetes.io/projected/a910d065-4dec-45e9-a81c-b06598b7b836-kube-api-access-v6tfb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.671245 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.677515 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.681539 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:49 crc kubenswrapper[4814]: I0122 05:48:49.708921 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6tfb\" (UniqueName: \"kubernetes.io/projected/a910d065-4dec-45e9-a81c-b06598b7b836-kube-api-access-v6tfb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:50 crc kubenswrapper[4814]: I0122 05:48:50.000976 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:50 crc kubenswrapper[4814]: I0122 05:48:50.604297 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g"] Jan 22 05:48:51 crc kubenswrapper[4814]: I0122 05:48:51.300447 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" event={"ID":"a910d065-4dec-45e9-a81c-b06598b7b836","Type":"ContainerStarted","Data":"96f5ceed586158360b794e8761ca1c213120d0292f0de18505e9bea5912a3469"} Jan 22 05:48:51 crc kubenswrapper[4814]: I0122 05:48:51.301650 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" event={"ID":"a910d065-4dec-45e9-a81c-b06598b7b836","Type":"ContainerStarted","Data":"0e1f64e1d280251b4f8d50f5b1c56094e59bad71b6f3ba44458d1783684ed239"} Jan 22 05:48:51 crc kubenswrapper[4814]: I0122 05:48:51.332717 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" podStartSLOduration=1.8922858470000001 podStartE2EDuration="2.33269094s" podCreationTimestamp="2026-01-22 05:48:49 +0000 UTC" firstStartedPulling="2026-01-22 05:48:50.609383408 +0000 UTC m=+1816.692871653" lastFinishedPulling="2026-01-22 05:48:51.049788501 +0000 UTC m=+1817.133276746" observedRunningTime="2026-01-22 05:48:51.318165339 +0000 UTC m=+1817.401653554" watchObservedRunningTime="2026-01-22 05:48:51.33269094 +0000 UTC m=+1817.416179195" Jan 22 05:48:55 crc kubenswrapper[4814]: I0122 05:48:55.344834 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:48:55 crc kubenswrapper[4814]: E0122 05:48:55.345822 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:48:57 crc kubenswrapper[4814]: I0122 05:48:57.359205 4814 generic.go:334] "Generic (PLEG): container finished" podID="a910d065-4dec-45e9-a81c-b06598b7b836" containerID="96f5ceed586158360b794e8761ca1c213120d0292f0de18505e9bea5912a3469" exitCode=0 Jan 22 05:48:57 crc kubenswrapper[4814]: I0122 05:48:57.359331 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" event={"ID":"a910d065-4dec-45e9-a81c-b06598b7b836","Type":"ContainerDied","Data":"96f5ceed586158360b794e8761ca1c213120d0292f0de18505e9bea5912a3469"} Jan 22 05:48:58 crc kubenswrapper[4814]: I0122 05:48:58.046400 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-57rkp"] Jan 22 05:48:58 crc kubenswrapper[4814]: I0122 05:48:58.055726 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-57rkp"] Jan 22 05:48:58 crc kubenswrapper[4814]: I0122 05:48:58.366387 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76b670b3-c66f-4b78-a355-951299de4283" path="/var/lib/kubelet/pods/76b670b3-c66f-4b78-a355-951299de4283/volumes" Jan 22 05:48:58 crc kubenswrapper[4814]: I0122 05:48:58.864965 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:58 crc kubenswrapper[4814]: I0122 05:48:58.971420 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-ssh-key-openstack-edpm-ipam\") pod \"a910d065-4dec-45e9-a81c-b06598b7b836\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " Jan 22 05:48:58 crc kubenswrapper[4814]: I0122 05:48:58.971471 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6tfb\" (UniqueName: \"kubernetes.io/projected/a910d065-4dec-45e9-a81c-b06598b7b836-kube-api-access-v6tfb\") pod \"a910d065-4dec-45e9-a81c-b06598b7b836\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " Jan 22 05:48:58 crc kubenswrapper[4814]: I0122 05:48:58.971593 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-inventory\") pod \"a910d065-4dec-45e9-a81c-b06598b7b836\" (UID: \"a910d065-4dec-45e9-a81c-b06598b7b836\") " Jan 22 05:48:58 crc kubenswrapper[4814]: I0122 05:48:58.979832 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a910d065-4dec-45e9-a81c-b06598b7b836-kube-api-access-v6tfb" (OuterVolumeSpecName: "kube-api-access-v6tfb") pod "a910d065-4dec-45e9-a81c-b06598b7b836" (UID: "a910d065-4dec-45e9-a81c-b06598b7b836"). InnerVolumeSpecName "kube-api-access-v6tfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.006158 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a910d065-4dec-45e9-a81c-b06598b7b836" (UID: "a910d065-4dec-45e9-a81c-b06598b7b836"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.017412 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-inventory" (OuterVolumeSpecName: "inventory") pod "a910d065-4dec-45e9-a81c-b06598b7b836" (UID: "a910d065-4dec-45e9-a81c-b06598b7b836"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.073351 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.073385 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a910d065-4dec-45e9-a81c-b06598b7b836-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.073397 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6tfb\" (UniqueName: \"kubernetes.io/projected/a910d065-4dec-45e9-a81c-b06598b7b836-kube-api-access-v6tfb\") on node \"crc\" DevicePath \"\"" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.381293 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" event={"ID":"a910d065-4dec-45e9-a81c-b06598b7b836","Type":"ContainerDied","Data":"0e1f64e1d280251b4f8d50f5b1c56094e59bad71b6f3ba44458d1783684ed239"} Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.381352 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e1f64e1d280251b4f8d50f5b1c56094e59bad71b6f3ba44458d1783684ed239" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.381397 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-zmv8g" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.472464 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj"] Jan 22 05:48:59 crc kubenswrapper[4814]: E0122 05:48:59.472918 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a910d065-4dec-45e9-a81c-b06598b7b836" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.472938 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a910d065-4dec-45e9-a81c-b06598b7b836" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.473147 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a910d065-4dec-45e9-a81c-b06598b7b836" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.473870 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.481015 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.481195 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.481350 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.486313 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.486650 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj"] Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.583792 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zn5qx\" (UniqueName: \"kubernetes.io/projected/1cc5906e-5b80-4a15-81dd-56c869d9004b-kube-api-access-zn5qx\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.584151 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.584221 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.686702 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.686873 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.686978 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zn5qx\" (UniqueName: \"kubernetes.io/projected/1cc5906e-5b80-4a15-81dd-56c869d9004b-kube-api-access-zn5qx\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.690407 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.693316 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.716465 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zn5qx\" (UniqueName: \"kubernetes.io/projected/1cc5906e-5b80-4a15-81dd-56c869d9004b-kube-api-access-zn5qx\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-b2czj\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:48:59 crc kubenswrapper[4814]: I0122 05:48:59.792056 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:49:00 crc kubenswrapper[4814]: I0122 05:49:00.033183 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-46576"] Jan 22 05:49:00 crc kubenswrapper[4814]: I0122 05:49:00.041716 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-46576"] Jan 22 05:49:00 crc kubenswrapper[4814]: I0122 05:49:00.354069 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7881f3e-36b2-4a90-85be-291e584e8e56" path="/var/lib/kubelet/pods/e7881f3e-36b2-4a90-85be-291e584e8e56/volumes" Jan 22 05:49:00 crc kubenswrapper[4814]: I0122 05:49:00.377071 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj"] Jan 22 05:49:01 crc kubenswrapper[4814]: I0122 05:49:01.401927 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" event={"ID":"1cc5906e-5b80-4a15-81dd-56c869d9004b","Type":"ContainerStarted","Data":"b324a9a388ac47d2d145786449d9f5fb5a0c851f5fd1264b95ba6c577762021b"} Jan 22 05:49:01 crc kubenswrapper[4814]: I0122 05:49:01.402347 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" event={"ID":"1cc5906e-5b80-4a15-81dd-56c869d9004b","Type":"ContainerStarted","Data":"6174ddb62a8434f94c771b45f9add8ef305a1474f71dad0eb79a308eb6f0694a"} Jan 22 05:49:10 crc kubenswrapper[4814]: I0122 05:49:10.344293 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:49:10 crc kubenswrapper[4814]: E0122 05:49:10.346116 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:49:21 crc kubenswrapper[4814]: I0122 05:49:21.343494 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:49:21 crc kubenswrapper[4814]: E0122 05:49:21.345179 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:49:33 crc kubenswrapper[4814]: I0122 05:49:33.344430 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:49:33 crc kubenswrapper[4814]: E0122 05:49:33.345365 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:49:43 crc kubenswrapper[4814]: I0122 05:49:43.055887 4814 scope.go:117] "RemoveContainer" containerID="f8cd8b6a23fdcc669e4ac28d7c80894a0d4d6c75a2f654ec2106e72869d0288e" Jan 22 05:49:43 crc kubenswrapper[4814]: I0122 05:49:43.068889 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" podStartSLOduration=43.614025744 podStartE2EDuration="44.068861825s" podCreationTimestamp="2026-01-22 05:48:59 +0000 UTC" firstStartedPulling="2026-01-22 05:49:00.389912529 +0000 UTC m=+1826.473400754" lastFinishedPulling="2026-01-22 05:49:00.84474858 +0000 UTC m=+1826.928236835" observedRunningTime="2026-01-22 05:49:01.42506363 +0000 UTC m=+1827.508551845" watchObservedRunningTime="2026-01-22 05:49:43.068861825 +0000 UTC m=+1869.152350050" Jan 22 05:49:43 crc kubenswrapper[4814]: I0122 05:49:43.069228 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-b6chm"] Jan 22 05:49:43 crc kubenswrapper[4814]: I0122 05:49:43.084166 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-b6chm"] Jan 22 05:49:43 crc kubenswrapper[4814]: I0122 05:49:43.088864 4814 scope.go:117] "RemoveContainer" containerID="8822399f550af14f4cf7ad7419db2561540311632754834741591b345125c9ac" Jan 22 05:49:44 crc kubenswrapper[4814]: I0122 05:49:44.349430 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:49:44 crc kubenswrapper[4814]: E0122 05:49:44.350269 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:49:44 crc kubenswrapper[4814]: I0122 05:49:44.355656 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79b7af13-1acf-421b-914f-2f8fd797cbe3" path="/var/lib/kubelet/pods/79b7af13-1acf-421b-914f-2f8fd797cbe3/volumes" Jan 22 05:49:46 crc kubenswrapper[4814]: I0122 05:49:46.828707 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s4pdg"] Jan 22 05:49:46 crc kubenswrapper[4814]: I0122 05:49:46.831415 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:46 crc kubenswrapper[4814]: I0122 05:49:46.864050 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4pdg"] Jan 22 05:49:46 crc kubenswrapper[4814]: I0122 05:49:46.867198 4814 generic.go:334] "Generic (PLEG): container finished" podID="1cc5906e-5b80-4a15-81dd-56c869d9004b" containerID="b324a9a388ac47d2d145786449d9f5fb5a0c851f5fd1264b95ba6c577762021b" exitCode=0 Jan 22 05:49:46 crc kubenswrapper[4814]: I0122 05:49:46.867488 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" event={"ID":"1cc5906e-5b80-4a15-81dd-56c869d9004b","Type":"ContainerDied","Data":"b324a9a388ac47d2d145786449d9f5fb5a0c851f5fd1264b95ba6c577762021b"} Jan 22 05:49:46 crc kubenswrapper[4814]: I0122 05:49:46.958649 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gk768\" (UniqueName: \"kubernetes.io/projected/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-kube-api-access-gk768\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:46 crc kubenswrapper[4814]: I0122 05:49:46.958697 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-catalog-content\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:46 crc kubenswrapper[4814]: I0122 05:49:46.958720 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-utilities\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.060573 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gk768\" (UniqueName: \"kubernetes.io/projected/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-kube-api-access-gk768\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.060643 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-catalog-content\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.060672 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-utilities\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.061114 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-catalog-content\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.061169 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-utilities\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.078418 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gk768\" (UniqueName: \"kubernetes.io/projected/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-kube-api-access-gk768\") pod \"redhat-marketplace-s4pdg\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.164481 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.642204 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4pdg"] Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.880134 4814 generic.go:334] "Generic (PLEG): container finished" podID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerID="9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014" exitCode=0 Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.881215 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4pdg" event={"ID":"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b","Type":"ContainerDied","Data":"9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014"} Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.881241 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4pdg" event={"ID":"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b","Type":"ContainerStarted","Data":"538bc9797a0bf61e94008c384e9da2ce013dab544c8903716ca15780a81e19a9"} Jan 22 05:49:47 crc kubenswrapper[4814]: I0122 05:49:47.882188 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.263740 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.382195 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-inventory\") pod \"1cc5906e-5b80-4a15-81dd-56c869d9004b\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.382470 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-ssh-key-openstack-edpm-ipam\") pod \"1cc5906e-5b80-4a15-81dd-56c869d9004b\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.382589 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zn5qx\" (UniqueName: \"kubernetes.io/projected/1cc5906e-5b80-4a15-81dd-56c869d9004b-kube-api-access-zn5qx\") pod \"1cc5906e-5b80-4a15-81dd-56c869d9004b\" (UID: \"1cc5906e-5b80-4a15-81dd-56c869d9004b\") " Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.390906 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cc5906e-5b80-4a15-81dd-56c869d9004b-kube-api-access-zn5qx" (OuterVolumeSpecName: "kube-api-access-zn5qx") pod "1cc5906e-5b80-4a15-81dd-56c869d9004b" (UID: "1cc5906e-5b80-4a15-81dd-56c869d9004b"). InnerVolumeSpecName "kube-api-access-zn5qx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.413927 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1cc5906e-5b80-4a15-81dd-56c869d9004b" (UID: "1cc5906e-5b80-4a15-81dd-56c869d9004b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.420833 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-inventory" (OuterVolumeSpecName: "inventory") pod "1cc5906e-5b80-4a15-81dd-56c869d9004b" (UID: "1cc5906e-5b80-4a15-81dd-56c869d9004b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.489258 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.489288 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1cc5906e-5b80-4a15-81dd-56c869d9004b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.489300 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zn5qx\" (UniqueName: \"kubernetes.io/projected/1cc5906e-5b80-4a15-81dd-56c869d9004b-kube-api-access-zn5qx\") on node \"crc\" DevicePath \"\"" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.891601 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" event={"ID":"1cc5906e-5b80-4a15-81dd-56c869d9004b","Type":"ContainerDied","Data":"6174ddb62a8434f94c771b45f9add8ef305a1474f71dad0eb79a308eb6f0694a"} Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.891669 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-b2czj" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.891674 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6174ddb62a8434f94c771b45f9add8ef305a1474f71dad0eb79a308eb6f0694a" Jan 22 05:49:48 crc kubenswrapper[4814]: I0122 05:49:48.895123 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4pdg" event={"ID":"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b","Type":"ContainerStarted","Data":"b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574"} Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.007004 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl"] Jan 22 05:49:49 crc kubenswrapper[4814]: E0122 05:49:49.008170 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc5906e-5b80-4a15-81dd-56c869d9004b" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.008269 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc5906e-5b80-4a15-81dd-56c869d9004b" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.008546 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cc5906e-5b80-4a15-81dd-56c869d9004b" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.009267 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.012939 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.013058 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.013592 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.013884 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.020781 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl"] Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.099856 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.099924 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77dwf\" (UniqueName: \"kubernetes.io/projected/bb4eb944-023b-442d-aa59-1b59541767e1-kube-api-access-77dwf\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.099986 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.201248 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.201418 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77dwf\" (UniqueName: \"kubernetes.io/projected/bb4eb944-023b-442d-aa59-1b59541767e1-kube-api-access-77dwf\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.201497 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.209078 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.211558 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.218523 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77dwf\" (UniqueName: \"kubernetes.io/projected/bb4eb944-023b-442d-aa59-1b59541767e1-kube-api-access-77dwf\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.324289 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.906310 4814 generic.go:334] "Generic (PLEG): container finished" podID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerID="b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574" exitCode=0 Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.906362 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4pdg" event={"ID":"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b","Type":"ContainerDied","Data":"b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574"} Jan 22 05:49:49 crc kubenswrapper[4814]: I0122 05:49:49.993997 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl"] Jan 22 05:49:50 crc kubenswrapper[4814]: I0122 05:49:50.916520 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" event={"ID":"bb4eb944-023b-442d-aa59-1b59541767e1","Type":"ContainerStarted","Data":"cfa062816e638a24140ef98634147d1f43ef5690e11a82dea7ef802f0fedc4e0"} Jan 22 05:49:50 crc kubenswrapper[4814]: I0122 05:49:50.916993 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" event={"ID":"bb4eb944-023b-442d-aa59-1b59541767e1","Type":"ContainerStarted","Data":"322a81e524b207901030dcf0774eb55e54b723499fa5cfdf1cd62f399194fc05"} Jan 22 05:49:50 crc kubenswrapper[4814]: I0122 05:49:50.920950 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4pdg" event={"ID":"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b","Type":"ContainerStarted","Data":"2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195"} Jan 22 05:49:50 crc kubenswrapper[4814]: I0122 05:49:50.935137 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" podStartSLOduration=2.520676748 podStartE2EDuration="2.935121528s" podCreationTimestamp="2026-01-22 05:49:48 +0000 UTC" firstStartedPulling="2026-01-22 05:49:50.043820069 +0000 UTC m=+1876.127308284" lastFinishedPulling="2026-01-22 05:49:50.458264849 +0000 UTC m=+1876.541753064" observedRunningTime="2026-01-22 05:49:50.929321188 +0000 UTC m=+1877.012809403" watchObservedRunningTime="2026-01-22 05:49:50.935121528 +0000 UTC m=+1877.018609743" Jan 22 05:49:50 crc kubenswrapper[4814]: I0122 05:49:50.958613 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s4pdg" podStartSLOduration=2.461933789 podStartE2EDuration="4.958597619s" podCreationTimestamp="2026-01-22 05:49:46 +0000 UTC" firstStartedPulling="2026-01-22 05:49:47.882003314 +0000 UTC m=+1873.965491529" lastFinishedPulling="2026-01-22 05:49:50.378667104 +0000 UTC m=+1876.462155359" observedRunningTime="2026-01-22 05:49:50.953230342 +0000 UTC m=+1877.036718557" watchObservedRunningTime="2026-01-22 05:49:50.958597619 +0000 UTC m=+1877.042085834" Jan 22 05:49:57 crc kubenswrapper[4814]: I0122 05:49:57.165669 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:57 crc kubenswrapper[4814]: I0122 05:49:57.165941 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:57 crc kubenswrapper[4814]: I0122 05:49:57.228782 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:58 crc kubenswrapper[4814]: I0122 05:49:58.061294 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:49:58 crc kubenswrapper[4814]: I0122 05:49:58.133250 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4pdg"] Jan 22 05:49:59 crc kubenswrapper[4814]: I0122 05:49:59.344313 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.004145 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s4pdg" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerName="registry-server" containerID="cri-o://2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195" gracePeriod=2 Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.004558 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"eba34c6458f41fc6c68b900130343d2fa0aa66d8953003d31615c0f8d2def5cd"} Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.520671 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.645654 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-utilities\") pod \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.646076 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gk768\" (UniqueName: \"kubernetes.io/projected/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-kube-api-access-gk768\") pod \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.646151 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-catalog-content\") pod \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\" (UID: \"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b\") " Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.646200 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-utilities" (OuterVolumeSpecName: "utilities") pod "4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" (UID: "4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.646540 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.651423 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-kube-api-access-gk768" (OuterVolumeSpecName: "kube-api-access-gk768") pod "4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" (UID: "4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b"). InnerVolumeSpecName "kube-api-access-gk768". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.669669 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" (UID: "4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.747741 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gk768\" (UniqueName: \"kubernetes.io/projected/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-kube-api-access-gk768\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:00 crc kubenswrapper[4814]: I0122 05:50:00.747788 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.016301 4814 generic.go:334] "Generic (PLEG): container finished" podID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerID="2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195" exitCode=0 Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.016365 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s4pdg" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.016399 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4pdg" event={"ID":"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b","Type":"ContainerDied","Data":"2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195"} Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.016715 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s4pdg" event={"ID":"4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b","Type":"ContainerDied","Data":"538bc9797a0bf61e94008c384e9da2ce013dab544c8903716ca15780a81e19a9"} Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.016750 4814 scope.go:117] "RemoveContainer" containerID="2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.035885 4814 scope.go:117] "RemoveContainer" containerID="b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.054736 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4pdg"] Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.061884 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s4pdg"] Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.066995 4814 scope.go:117] "RemoveContainer" containerID="9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.115650 4814 scope.go:117] "RemoveContainer" containerID="2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195" Jan 22 05:50:01 crc kubenswrapper[4814]: E0122 05:50:01.116155 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195\": container with ID starting with 2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195 not found: ID does not exist" containerID="2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.116185 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195"} err="failed to get container status \"2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195\": rpc error: code = NotFound desc = could not find container \"2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195\": container with ID starting with 2681106af2fd22382e9310abf9ec0134023efbc9c1cbc67e370e68e635ca1195 not found: ID does not exist" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.116205 4814 scope.go:117] "RemoveContainer" containerID="b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574" Jan 22 05:50:01 crc kubenswrapper[4814]: E0122 05:50:01.116456 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574\": container with ID starting with b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574 not found: ID does not exist" containerID="b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.116485 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574"} err="failed to get container status \"b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574\": rpc error: code = NotFound desc = could not find container \"b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574\": container with ID starting with b9b688f7884e5fc97e4cde948dbce32a53084bb9d4b21107098c6c0cba566574 not found: ID does not exist" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.116499 4814 scope.go:117] "RemoveContainer" containerID="9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014" Jan 22 05:50:01 crc kubenswrapper[4814]: E0122 05:50:01.116903 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014\": container with ID starting with 9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014 not found: ID does not exist" containerID="9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014" Jan 22 05:50:01 crc kubenswrapper[4814]: I0122 05:50:01.116923 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014"} err="failed to get container status \"9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014\": rpc error: code = NotFound desc = could not find container \"9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014\": container with ID starting with 9dc785f5ac587fb77fd1b7248b074fd1bbf9d3c928b0ad129afcc110df539014 not found: ID does not exist" Jan 22 05:50:02 crc kubenswrapper[4814]: I0122 05:50:02.355419 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" path="/var/lib/kubelet/pods/4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b/volumes" Jan 22 05:50:43 crc kubenswrapper[4814]: I0122 05:50:43.188211 4814 scope.go:117] "RemoveContainer" containerID="27a8b861a7885225d7763ea24046729800b4d5fd0d87211e36507fc8c55a71d9" Jan 22 05:50:53 crc kubenswrapper[4814]: E0122 05:50:53.751216 4814 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb4eb944_023b_442d_aa59_1b59541767e1.slice/crio-conmon-cfa062816e638a24140ef98634147d1f43ef5690e11a82dea7ef802f0fedc4e0.scope\": RecentStats: unable to find data in memory cache]" Jan 22 05:50:54 crc kubenswrapper[4814]: I0122 05:50:54.571900 4814 generic.go:334] "Generic (PLEG): container finished" podID="bb4eb944-023b-442d-aa59-1b59541767e1" containerID="cfa062816e638a24140ef98634147d1f43ef5690e11a82dea7ef802f0fedc4e0" exitCode=0 Jan 22 05:50:54 crc kubenswrapper[4814]: I0122 05:50:54.572397 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" event={"ID":"bb4eb944-023b-442d-aa59-1b59541767e1","Type":"ContainerDied","Data":"cfa062816e638a24140ef98634147d1f43ef5690e11a82dea7ef802f0fedc4e0"} Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.033684 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.223841 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77dwf\" (UniqueName: \"kubernetes.io/projected/bb4eb944-023b-442d-aa59-1b59541767e1-kube-api-access-77dwf\") pod \"bb4eb944-023b-442d-aa59-1b59541767e1\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.223927 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-ssh-key-openstack-edpm-ipam\") pod \"bb4eb944-023b-442d-aa59-1b59541767e1\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.224083 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-inventory\") pod \"bb4eb944-023b-442d-aa59-1b59541767e1\" (UID: \"bb4eb944-023b-442d-aa59-1b59541767e1\") " Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.236963 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb4eb944-023b-442d-aa59-1b59541767e1-kube-api-access-77dwf" (OuterVolumeSpecName: "kube-api-access-77dwf") pod "bb4eb944-023b-442d-aa59-1b59541767e1" (UID: "bb4eb944-023b-442d-aa59-1b59541767e1"). InnerVolumeSpecName "kube-api-access-77dwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.251191 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "bb4eb944-023b-442d-aa59-1b59541767e1" (UID: "bb4eb944-023b-442d-aa59-1b59541767e1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.252295 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-inventory" (OuterVolumeSpecName: "inventory") pod "bb4eb944-023b-442d-aa59-1b59541767e1" (UID: "bb4eb944-023b-442d-aa59-1b59541767e1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.326750 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77dwf\" (UniqueName: \"kubernetes.io/projected/bb4eb944-023b-442d-aa59-1b59541767e1-kube-api-access-77dwf\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.326781 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.326791 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb4eb944-023b-442d-aa59-1b59541767e1-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.592370 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" event={"ID":"bb4eb944-023b-442d-aa59-1b59541767e1","Type":"ContainerDied","Data":"322a81e524b207901030dcf0774eb55e54b723499fa5cfdf1cd62f399194fc05"} Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.592734 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="322a81e524b207901030dcf0774eb55e54b723499fa5cfdf1cd62f399194fc05" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.592454 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-sqjfl" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.700345 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rm2l9"] Jan 22 05:50:56 crc kubenswrapper[4814]: E0122 05:50:56.701215 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb4eb944-023b-442d-aa59-1b59541767e1" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.701401 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb4eb944-023b-442d-aa59-1b59541767e1" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:50:56 crc kubenswrapper[4814]: E0122 05:50:56.701535 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerName="registry-server" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.701673 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerName="registry-server" Jan 22 05:50:56 crc kubenswrapper[4814]: E0122 05:50:56.701845 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerName="extract-utilities" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.701991 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerName="extract-utilities" Jan 22 05:50:56 crc kubenswrapper[4814]: E0122 05:50:56.702127 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerName="extract-content" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.702238 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerName="extract-content" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.702708 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb4eb944-023b-442d-aa59-1b59541767e1" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.702855 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bb45f7c-dcf5-4a7f-bb20-bb2335f3114b" containerName="registry-server" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.703917 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.708366 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.708920 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.709858 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.715990 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.722144 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rm2l9"] Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.852562 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.852663 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4kgh\" (UniqueName: \"kubernetes.io/projected/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-kube-api-access-w4kgh\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.853749 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.956328 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.956484 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.956529 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4kgh\" (UniqueName: \"kubernetes.io/projected/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-kube-api-access-w4kgh\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.966166 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.966189 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:56 crc kubenswrapper[4814]: I0122 05:50:56.979579 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4kgh\" (UniqueName: \"kubernetes.io/projected/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-kube-api-access-w4kgh\") pod \"ssh-known-hosts-edpm-deployment-rm2l9\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:57 crc kubenswrapper[4814]: I0122 05:50:57.057830 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:50:57 crc kubenswrapper[4814]: I0122 05:50:57.671975 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-rm2l9"] Jan 22 05:50:58 crc kubenswrapper[4814]: I0122 05:50:58.622879 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" event={"ID":"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6","Type":"ContainerStarted","Data":"61690f1b9622bcfc4c53d52816a969081794503f3698d8776c8c84150083c6a1"} Jan 22 05:50:58 crc kubenswrapper[4814]: I0122 05:50:58.624151 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" event={"ID":"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6","Type":"ContainerStarted","Data":"ccf0055203edbdc390e20687ff628dfc500fe770736c47f04d23e7711ca1293c"} Jan 22 05:50:58 crc kubenswrapper[4814]: I0122 05:50:58.646677 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" podStartSLOduration=2.230324944 podStartE2EDuration="2.64664301s" podCreationTimestamp="2026-01-22 05:50:56 +0000 UTC" firstStartedPulling="2026-01-22 05:50:57.685784399 +0000 UTC m=+1943.769272614" lastFinishedPulling="2026-01-22 05:50:58.102102435 +0000 UTC m=+1944.185590680" observedRunningTime="2026-01-22 05:50:58.636789403 +0000 UTC m=+1944.720277618" watchObservedRunningTime="2026-01-22 05:50:58.64664301 +0000 UTC m=+1944.730131225" Jan 22 05:51:06 crc kubenswrapper[4814]: I0122 05:51:06.703497 4814 generic.go:334] "Generic (PLEG): container finished" podID="bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6" containerID="61690f1b9622bcfc4c53d52816a969081794503f3698d8776c8c84150083c6a1" exitCode=0 Jan 22 05:51:06 crc kubenswrapper[4814]: I0122 05:51:06.703612 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" event={"ID":"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6","Type":"ContainerDied","Data":"61690f1b9622bcfc4c53d52816a969081794503f3698d8776c8c84150083c6a1"} Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.190887 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.300419 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4kgh\" (UniqueName: \"kubernetes.io/projected/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-kube-api-access-w4kgh\") pod \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.300518 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-inventory-0\") pod \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.300550 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-ssh-key-openstack-edpm-ipam\") pod \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\" (UID: \"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6\") " Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.306995 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-kube-api-access-w4kgh" (OuterVolumeSpecName: "kube-api-access-w4kgh") pod "bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6" (UID: "bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6"). InnerVolumeSpecName "kube-api-access-w4kgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.334455 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6" (UID: "bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.366805 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6" (UID: "bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.403247 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4kgh\" (UniqueName: \"kubernetes.io/projected/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-kube-api-access-w4kgh\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.403294 4814 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.403316 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.729210 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" event={"ID":"bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6","Type":"ContainerDied","Data":"ccf0055203edbdc390e20687ff628dfc500fe770736c47f04d23e7711ca1293c"} Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.729562 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ccf0055203edbdc390e20687ff628dfc500fe770736c47f04d23e7711ca1293c" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.729312 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-rm2l9" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.831788 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs"] Jan 22 05:51:08 crc kubenswrapper[4814]: E0122 05:51:08.832315 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6" containerName="ssh-known-hosts-edpm-deployment" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.832338 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6" containerName="ssh-known-hosts-edpm-deployment" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.832593 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdaedc16-3fd9-43b3-8cdd-9ceed14b51a6" containerName="ssh-known-hosts-edpm-deployment" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.833589 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.835611 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.836036 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.836169 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.836205 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.867110 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs"] Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.913048 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.913090 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:08 crc kubenswrapper[4814]: I0122 05:51:08.913162 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-462sh\" (UniqueName: \"kubernetes.io/projected/65078b0d-df77-4574-8eaa-010e014a0f62-kube-api-access-462sh\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.014864 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.014945 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.015091 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-462sh\" (UniqueName: \"kubernetes.io/projected/65078b0d-df77-4574-8eaa-010e014a0f62-kube-api-access-462sh\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.018776 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.022772 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.034468 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-462sh\" (UniqueName: \"kubernetes.io/projected/65078b0d-df77-4574-8eaa-010e014a0f62-kube-api-access-462sh\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-xg2zs\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.157497 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.718188 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs"] Jan 22 05:51:09 crc kubenswrapper[4814]: I0122 05:51:09.742085 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" event={"ID":"65078b0d-df77-4574-8eaa-010e014a0f62","Type":"ContainerStarted","Data":"fdf84b120423d1c9ee1cc10c4a80fdc469ad6e09da453a01e679d9cbae232b5f"} Jan 22 05:51:10 crc kubenswrapper[4814]: I0122 05:51:10.754684 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" event={"ID":"65078b0d-df77-4574-8eaa-010e014a0f62","Type":"ContainerStarted","Data":"0f43a3abe81d613929bcbb115dbbcdc78108eed0af3b682fd47a2b782895ff77"} Jan 22 05:51:10 crc kubenswrapper[4814]: I0122 05:51:10.777825 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" podStartSLOduration=2.269672625 podStartE2EDuration="2.777796286s" podCreationTimestamp="2026-01-22 05:51:08 +0000 UTC" firstStartedPulling="2026-01-22 05:51:09.734671708 +0000 UTC m=+1955.818159923" lastFinishedPulling="2026-01-22 05:51:10.242795329 +0000 UTC m=+1956.326283584" observedRunningTime="2026-01-22 05:51:10.7740495 +0000 UTC m=+1956.857537715" watchObservedRunningTime="2026-01-22 05:51:10.777796286 +0000 UTC m=+1956.861284541" Jan 22 05:51:19 crc kubenswrapper[4814]: I0122 05:51:19.836716 4814 generic.go:334] "Generic (PLEG): container finished" podID="65078b0d-df77-4574-8eaa-010e014a0f62" containerID="0f43a3abe81d613929bcbb115dbbcdc78108eed0af3b682fd47a2b782895ff77" exitCode=0 Jan 22 05:51:19 crc kubenswrapper[4814]: I0122 05:51:19.836791 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" event={"ID":"65078b0d-df77-4574-8eaa-010e014a0f62","Type":"ContainerDied","Data":"0f43a3abe81d613929bcbb115dbbcdc78108eed0af3b682fd47a2b782895ff77"} Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.350148 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.461010 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-inventory\") pod \"65078b0d-df77-4574-8eaa-010e014a0f62\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.461213 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-462sh\" (UniqueName: \"kubernetes.io/projected/65078b0d-df77-4574-8eaa-010e014a0f62-kube-api-access-462sh\") pod \"65078b0d-df77-4574-8eaa-010e014a0f62\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.461273 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-ssh-key-openstack-edpm-ipam\") pod \"65078b0d-df77-4574-8eaa-010e014a0f62\" (UID: \"65078b0d-df77-4574-8eaa-010e014a0f62\") " Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.479156 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65078b0d-df77-4574-8eaa-010e014a0f62-kube-api-access-462sh" (OuterVolumeSpecName: "kube-api-access-462sh") pod "65078b0d-df77-4574-8eaa-010e014a0f62" (UID: "65078b0d-df77-4574-8eaa-010e014a0f62"). InnerVolumeSpecName "kube-api-access-462sh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.487865 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "65078b0d-df77-4574-8eaa-010e014a0f62" (UID: "65078b0d-df77-4574-8eaa-010e014a0f62"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.517943 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-inventory" (OuterVolumeSpecName: "inventory") pod "65078b0d-df77-4574-8eaa-010e014a0f62" (UID: "65078b0d-df77-4574-8eaa-010e014a0f62"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.563052 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-462sh\" (UniqueName: \"kubernetes.io/projected/65078b0d-df77-4574-8eaa-010e014a0f62-kube-api-access-462sh\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.563086 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.563101 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65078b0d-df77-4574-8eaa-010e014a0f62-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.876854 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" event={"ID":"65078b0d-df77-4574-8eaa-010e014a0f62","Type":"ContainerDied","Data":"fdf84b120423d1c9ee1cc10c4a80fdc469ad6e09da453a01e679d9cbae232b5f"} Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.876904 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fdf84b120423d1c9ee1cc10c4a80fdc469ad6e09da453a01e679d9cbae232b5f" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.876928 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-xg2zs" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.953106 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw"] Jan 22 05:51:21 crc kubenswrapper[4814]: E0122 05:51:21.953963 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65078b0d-df77-4574-8eaa-010e014a0f62" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.954636 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="65078b0d-df77-4574-8eaa-010e014a0f62" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.954895 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="65078b0d-df77-4574-8eaa-010e014a0f62" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.955534 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.964049 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw"] Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.966143 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.966345 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.966440 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:51:21 crc kubenswrapper[4814]: I0122 05:51:21.966439 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.074420 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.074508 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.074617 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzx6r\" (UniqueName: \"kubernetes.io/projected/07641943-7225-43be-a50b-b576614b9ff9-kube-api-access-dzx6r\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.176759 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzx6r\" (UniqueName: \"kubernetes.io/projected/07641943-7225-43be-a50b-b576614b9ff9-kube-api-access-dzx6r\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.176860 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.176886 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.181981 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.182059 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.200395 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzx6r\" (UniqueName: \"kubernetes.io/projected/07641943-7225-43be-a50b-b576614b9ff9-kube-api-access-dzx6r\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.291985 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:22 crc kubenswrapper[4814]: I0122 05:51:22.900865 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw"] Jan 22 05:51:23 crc kubenswrapper[4814]: I0122 05:51:23.898700 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" event={"ID":"07641943-7225-43be-a50b-b576614b9ff9","Type":"ContainerStarted","Data":"2927686574e42d0e25c1fad39c4272936bacade1c697067bbc4c9ae96b9152e6"} Jan 22 05:51:23 crc kubenswrapper[4814]: I0122 05:51:23.899128 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" event={"ID":"07641943-7225-43be-a50b-b576614b9ff9","Type":"ContainerStarted","Data":"e09471e70619eb4226bcf81828291a9ef99e204f6898cb661b7626a5b954f93f"} Jan 22 05:51:23 crc kubenswrapper[4814]: I0122 05:51:23.925386 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" podStartSLOduration=2.520578355 podStartE2EDuration="2.925367182s" podCreationTimestamp="2026-01-22 05:51:21 +0000 UTC" firstStartedPulling="2026-01-22 05:51:22.912909998 +0000 UTC m=+1968.996398233" lastFinishedPulling="2026-01-22 05:51:23.317698845 +0000 UTC m=+1969.401187060" observedRunningTime="2026-01-22 05:51:23.913788212 +0000 UTC m=+1969.997276427" watchObservedRunningTime="2026-01-22 05:51:23.925367182 +0000 UTC m=+1970.008855397" Jan 22 05:51:33 crc kubenswrapper[4814]: I0122 05:51:33.989252 4814 generic.go:334] "Generic (PLEG): container finished" podID="07641943-7225-43be-a50b-b576614b9ff9" containerID="2927686574e42d0e25c1fad39c4272936bacade1c697067bbc4c9ae96b9152e6" exitCode=0 Jan 22 05:51:33 crc kubenswrapper[4814]: I0122 05:51:33.989303 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" event={"ID":"07641943-7225-43be-a50b-b576614b9ff9","Type":"ContainerDied","Data":"2927686574e42d0e25c1fad39c4272936bacade1c697067bbc4c9ae96b9152e6"} Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.616270 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.675895 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-ssh-key-openstack-edpm-ipam\") pod \"07641943-7225-43be-a50b-b576614b9ff9\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.676370 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzx6r\" (UniqueName: \"kubernetes.io/projected/07641943-7225-43be-a50b-b576614b9ff9-kube-api-access-dzx6r\") pod \"07641943-7225-43be-a50b-b576614b9ff9\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.676422 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-inventory\") pod \"07641943-7225-43be-a50b-b576614b9ff9\" (UID: \"07641943-7225-43be-a50b-b576614b9ff9\") " Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.681415 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07641943-7225-43be-a50b-b576614b9ff9-kube-api-access-dzx6r" (OuterVolumeSpecName: "kube-api-access-dzx6r") pod "07641943-7225-43be-a50b-b576614b9ff9" (UID: "07641943-7225-43be-a50b-b576614b9ff9"). InnerVolumeSpecName "kube-api-access-dzx6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.714951 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-inventory" (OuterVolumeSpecName: "inventory") pod "07641943-7225-43be-a50b-b576614b9ff9" (UID: "07641943-7225-43be-a50b-b576614b9ff9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.731224 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "07641943-7225-43be-a50b-b576614b9ff9" (UID: "07641943-7225-43be-a50b-b576614b9ff9"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.782134 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzx6r\" (UniqueName: \"kubernetes.io/projected/07641943-7225-43be-a50b-b576614b9ff9-kube-api-access-dzx6r\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.782210 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:35 crc kubenswrapper[4814]: I0122 05:51:35.782239 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07641943-7225-43be-a50b-b576614b9ff9-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.014429 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" event={"ID":"07641943-7225-43be-a50b-b576614b9ff9","Type":"ContainerDied","Data":"e09471e70619eb4226bcf81828291a9ef99e204f6898cb661b7626a5b954f93f"} Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.014475 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e09471e70619eb4226bcf81828291a9ef99e204f6898cb661b7626a5b954f93f" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.014517 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-z8ptw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.125229 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw"] Jan 22 05:51:36 crc kubenswrapper[4814]: E0122 05:51:36.125595 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07641943-7225-43be-a50b-b576614b9ff9" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.125611 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="07641943-7225-43be-a50b-b576614b9ff9" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.125830 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="07641943-7225-43be-a50b-b576614b9ff9" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.126421 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.132915 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.133168 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.133270 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.133412 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.133538 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.133658 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.133783 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.133907 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.142679 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw"] Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.189545 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.189592 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.189649 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.189672 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.189853 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.189941 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6xwn\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-kube-api-access-x6xwn\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.189984 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.190053 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.190112 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.190205 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.190259 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.190394 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.190501 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.190553 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.291829 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.291918 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.291959 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.291988 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292040 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292084 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292108 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292164 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292193 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292227 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292253 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292303 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292337 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6xwn\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-kube-api-access-x6xwn\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.292361 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.295830 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.298569 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.308759 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.310802 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.314528 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.316958 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.318045 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.320439 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.320798 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.321130 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.321293 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.321688 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.323154 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.332190 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6xwn\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-kube-api-access-x6xwn\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-plccw\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:36 crc kubenswrapper[4814]: I0122 05:51:36.461808 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:51:37 crc kubenswrapper[4814]: I0122 05:51:37.047787 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw"] Jan 22 05:51:37 crc kubenswrapper[4814]: W0122 05:51:37.049820 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75bd4e5f_1c5c_4ccd_9b06_be8c501d7d4c.slice/crio-960b6f255a324a2191e40f996fd63923804f9eb6eab75fd2c15330301afc4fe9 WatchSource:0}: Error finding container 960b6f255a324a2191e40f996fd63923804f9eb6eab75fd2c15330301afc4fe9: Status 404 returned error can't find the container with id 960b6f255a324a2191e40f996fd63923804f9eb6eab75fd2c15330301afc4fe9 Jan 22 05:51:38 crc kubenswrapper[4814]: I0122 05:51:38.043138 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" event={"ID":"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c","Type":"ContainerStarted","Data":"1c12c06ec609a19b75ffa47ae95c7c23bc203f39e876aa935b0f46b2d37b6eeb"} Jan 22 05:51:38 crc kubenswrapper[4814]: I0122 05:51:38.043747 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" event={"ID":"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c","Type":"ContainerStarted","Data":"960b6f255a324a2191e40f996fd63923804f9eb6eab75fd2c15330301afc4fe9"} Jan 22 05:51:38 crc kubenswrapper[4814]: I0122 05:51:38.075102 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" podStartSLOduration=1.647099462 podStartE2EDuration="2.075074411s" podCreationTimestamp="2026-01-22 05:51:36 +0000 UTC" firstStartedPulling="2026-01-22 05:51:37.05214437 +0000 UTC m=+1983.135632585" lastFinishedPulling="2026-01-22 05:51:37.480119319 +0000 UTC m=+1983.563607534" observedRunningTime="2026-01-22 05:51:38.073387098 +0000 UTC m=+1984.156875393" watchObservedRunningTime="2026-01-22 05:51:38.075074411 +0000 UTC m=+1984.158562656" Jan 22 05:52:19 crc kubenswrapper[4814]: I0122 05:52:19.614756 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:52:19 crc kubenswrapper[4814]: I0122 05:52:19.615383 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:52:22 crc kubenswrapper[4814]: I0122 05:52:22.512720 4814 generic.go:334] "Generic (PLEG): container finished" podID="75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" containerID="1c12c06ec609a19b75ffa47ae95c7c23bc203f39e876aa935b0f46b2d37b6eeb" exitCode=0 Jan 22 05:52:22 crc kubenswrapper[4814]: I0122 05:52:22.512828 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" event={"ID":"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c","Type":"ContainerDied","Data":"1c12c06ec609a19b75ffa47ae95c7c23bc203f39e876aa935b0f46b2d37b6eeb"} Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.040351 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.191877 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ssh-key-openstack-edpm-ipam\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.193315 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.194790 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ovn-combined-ca-bundle\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.195259 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.196049 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6xwn\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-kube-api-access-x6xwn\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.202816 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-telemetry-combined-ca-bundle\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.203021 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-nova-combined-ca-bundle\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.200016 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-kube-api-access-x6xwn" (OuterVolumeSpecName: "kube-api-access-x6xwn") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "kube-api-access-x6xwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.203432 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-ovn-default-certs-0\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.203706 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-repo-setup-combined-ca-bundle\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.203903 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-neutron-metadata-combined-ca-bundle\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.204057 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-libvirt-combined-ca-bundle\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.204242 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-inventory\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.204530 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.204778 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-bootstrap-combined-ca-bundle\") pod \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\" (UID: \"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c\") " Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.205301 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.206132 4814 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.206310 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6xwn\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-kube-api-access-x6xwn\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.206268 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.206345 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.208458 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.210432 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.213567 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.214188 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.214395 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.217852 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.221332 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.231839 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.247766 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.273669 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-inventory" (OuterVolumeSpecName: "inventory") pod "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" (UID: "75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308404 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308433 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308448 4814 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308462 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308476 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308491 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308503 4814 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308515 4814 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308527 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308539 4814 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308551 4814 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.308563 4814 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.535695 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" event={"ID":"75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c","Type":"ContainerDied","Data":"960b6f255a324a2191e40f996fd63923804f9eb6eab75fd2c15330301afc4fe9"} Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.535729 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="960b6f255a324a2191e40f996fd63923804f9eb6eab75fd2c15330301afc4fe9" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.535746 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-plccw" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.744606 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq"] Jan 22 05:52:24 crc kubenswrapper[4814]: E0122 05:52:24.745191 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.745221 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.745533 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="75bd4e5f-1c5c-4ccd-9b06-be8c501d7d4c" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.746666 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.754556 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.755076 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.755456 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.755843 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.756086 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.757942 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq"] Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.817368 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.817694 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.817831 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.817957 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.818147 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqwd6\" (UniqueName: \"kubernetes.io/projected/ef9f2307-3515-49f5-b666-bfcbd1536de1-kube-api-access-nqwd6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.920089 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.920442 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.920608 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.920799 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.921061 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqwd6\" (UniqueName: \"kubernetes.io/projected/ef9f2307-3515-49f5-b666-bfcbd1536de1-kube-api-access-nqwd6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.922029 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.924535 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.925569 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.926833 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:24 crc kubenswrapper[4814]: I0122 05:52:24.945217 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqwd6\" (UniqueName: \"kubernetes.io/projected/ef9f2307-3515-49f5-b666-bfcbd1536de1-kube-api-access-nqwd6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-cz4dq\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:25 crc kubenswrapper[4814]: I0122 05:52:25.071058 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:52:25 crc kubenswrapper[4814]: I0122 05:52:25.650221 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq"] Jan 22 05:52:25 crc kubenswrapper[4814]: W0122 05:52:25.657035 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef9f2307_3515_49f5_b666_bfcbd1536de1.slice/crio-fa4bc33e82988e31006c7da50bc34140378e2b2359ca53e788fe2c60d3b8434b WatchSource:0}: Error finding container fa4bc33e82988e31006c7da50bc34140378e2b2359ca53e788fe2c60d3b8434b: Status 404 returned error can't find the container with id fa4bc33e82988e31006c7da50bc34140378e2b2359ca53e788fe2c60d3b8434b Jan 22 05:52:26 crc kubenswrapper[4814]: I0122 05:52:26.564098 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" event={"ID":"ef9f2307-3515-49f5-b666-bfcbd1536de1","Type":"ContainerStarted","Data":"5f0575e5c016bf9c15e8cf7562538a0a824b426026f3e55d72a31115e33ee794"} Jan 22 05:52:26 crc kubenswrapper[4814]: I0122 05:52:26.564691 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" event={"ID":"ef9f2307-3515-49f5-b666-bfcbd1536de1","Type":"ContainerStarted","Data":"fa4bc33e82988e31006c7da50bc34140378e2b2359ca53e788fe2c60d3b8434b"} Jan 22 05:52:26 crc kubenswrapper[4814]: I0122 05:52:26.601861 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" podStartSLOduration=2.095303699 podStartE2EDuration="2.601839245s" podCreationTimestamp="2026-01-22 05:52:24 +0000 UTC" firstStartedPulling="2026-01-22 05:52:25.661582582 +0000 UTC m=+2031.745070837" lastFinishedPulling="2026-01-22 05:52:26.168118168 +0000 UTC m=+2032.251606383" observedRunningTime="2026-01-22 05:52:26.592570136 +0000 UTC m=+2032.676058371" watchObservedRunningTime="2026-01-22 05:52:26.601839245 +0000 UTC m=+2032.685327460" Jan 22 05:52:49 crc kubenswrapper[4814]: I0122 05:52:49.613786 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:52:49 crc kubenswrapper[4814]: I0122 05:52:49.614479 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.065020 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7xln9"] Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.068103 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.085116 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7xln9"] Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.232268 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w4jt\" (UniqueName: \"kubernetes.io/projected/d0b962da-0f8b-4b31-9065-09964bb3c242-kube-api-access-6w4jt\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.232331 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-utilities\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.232459 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-catalog-content\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.334206 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-catalog-content\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.334404 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w4jt\" (UniqueName: \"kubernetes.io/projected/d0b962da-0f8b-4b31-9065-09964bb3c242-kube-api-access-6w4jt\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.334450 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-utilities\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.334673 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-catalog-content\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.334871 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-utilities\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.360833 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w4jt\" (UniqueName: \"kubernetes.io/projected/d0b962da-0f8b-4b31-9065-09964bb3c242-kube-api-access-6w4jt\") pod \"redhat-operators-7xln9\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.410064 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:11 crc kubenswrapper[4814]: I0122 05:53:11.907891 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7xln9"] Jan 22 05:53:12 crc kubenswrapper[4814]: I0122 05:53:12.052143 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xln9" event={"ID":"d0b962da-0f8b-4b31-9065-09964bb3c242","Type":"ContainerStarted","Data":"8835aa12d02c9d473011be0b46ae9b763b146c0283b2b6135570b583551debb5"} Jan 22 05:53:13 crc kubenswrapper[4814]: I0122 05:53:13.064413 4814 generic.go:334] "Generic (PLEG): container finished" podID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerID="57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14" exitCode=0 Jan 22 05:53:13 crc kubenswrapper[4814]: I0122 05:53:13.064458 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xln9" event={"ID":"d0b962da-0f8b-4b31-9065-09964bb3c242","Type":"ContainerDied","Data":"57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14"} Jan 22 05:53:15 crc kubenswrapper[4814]: I0122 05:53:15.099099 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xln9" event={"ID":"d0b962da-0f8b-4b31-9065-09964bb3c242","Type":"ContainerStarted","Data":"7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8"} Jan 22 05:53:18 crc kubenswrapper[4814]: I0122 05:53:18.142227 4814 generic.go:334] "Generic (PLEG): container finished" podID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerID="7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8" exitCode=0 Jan 22 05:53:18 crc kubenswrapper[4814]: I0122 05:53:18.142334 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xln9" event={"ID":"d0b962da-0f8b-4b31-9065-09964bb3c242","Type":"ContainerDied","Data":"7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8"} Jan 22 05:53:19 crc kubenswrapper[4814]: I0122 05:53:19.157872 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xln9" event={"ID":"d0b962da-0f8b-4b31-9065-09964bb3c242","Type":"ContainerStarted","Data":"caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a"} Jan 22 05:53:19 crc kubenswrapper[4814]: I0122 05:53:19.183006 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7xln9" podStartSLOduration=2.672424854 podStartE2EDuration="8.182986291s" podCreationTimestamp="2026-01-22 05:53:11 +0000 UTC" firstStartedPulling="2026-01-22 05:53:13.06679716 +0000 UTC m=+2079.150285375" lastFinishedPulling="2026-01-22 05:53:18.577358557 +0000 UTC m=+2084.660846812" observedRunningTime="2026-01-22 05:53:19.177374909 +0000 UTC m=+2085.260863114" watchObservedRunningTime="2026-01-22 05:53:19.182986291 +0000 UTC m=+2085.266474516" Jan 22 05:53:19 crc kubenswrapper[4814]: I0122 05:53:19.613826 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:53:19 crc kubenswrapper[4814]: I0122 05:53:19.613907 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:53:19 crc kubenswrapper[4814]: I0122 05:53:19.613956 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:53:19 crc kubenswrapper[4814]: I0122 05:53:19.614960 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eba34c6458f41fc6c68b900130343d2fa0aa66d8953003d31615c0f8d2def5cd"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:53:19 crc kubenswrapper[4814]: I0122 05:53:19.615104 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://eba34c6458f41fc6c68b900130343d2fa0aa66d8953003d31615c0f8d2def5cd" gracePeriod=600 Jan 22 05:53:20 crc kubenswrapper[4814]: I0122 05:53:20.176300 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="eba34c6458f41fc6c68b900130343d2fa0aa66d8953003d31615c0f8d2def5cd" exitCode=0 Jan 22 05:53:20 crc kubenswrapper[4814]: I0122 05:53:20.176397 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"eba34c6458f41fc6c68b900130343d2fa0aa66d8953003d31615c0f8d2def5cd"} Jan 22 05:53:20 crc kubenswrapper[4814]: I0122 05:53:20.176598 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0"} Jan 22 05:53:20 crc kubenswrapper[4814]: I0122 05:53:20.176621 4814 scope.go:117] "RemoveContainer" containerID="60b2e956cdda9487023a3e5e655617c6b5dc55bded88622bada54bcafcadd683" Jan 22 05:53:21 crc kubenswrapper[4814]: I0122 05:53:21.410379 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:21 crc kubenswrapper[4814]: I0122 05:53:21.410606 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:22 crc kubenswrapper[4814]: I0122 05:53:22.472659 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7xln9" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="registry-server" probeResult="failure" output=< Jan 22 05:53:22 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:53:22 crc kubenswrapper[4814]: > Jan 22 05:53:32 crc kubenswrapper[4814]: I0122 05:53:32.463661 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7xln9" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="registry-server" probeResult="failure" output=< Jan 22 05:53:32 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 05:53:32 crc kubenswrapper[4814]: > Jan 22 05:53:41 crc kubenswrapper[4814]: I0122 05:53:41.480045 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:41 crc kubenswrapper[4814]: I0122 05:53:41.558129 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:42 crc kubenswrapper[4814]: I0122 05:53:42.267949 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7xln9"] Jan 22 05:53:42 crc kubenswrapper[4814]: I0122 05:53:42.418065 4814 generic.go:334] "Generic (PLEG): container finished" podID="ef9f2307-3515-49f5-b666-bfcbd1536de1" containerID="5f0575e5c016bf9c15e8cf7562538a0a824b426026f3e55d72a31115e33ee794" exitCode=0 Jan 22 05:53:42 crc kubenswrapper[4814]: I0122 05:53:42.418306 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" event={"ID":"ef9f2307-3515-49f5-b666-bfcbd1536de1","Type":"ContainerDied","Data":"5f0575e5c016bf9c15e8cf7562538a0a824b426026f3e55d72a31115e33ee794"} Jan 22 05:53:43 crc kubenswrapper[4814]: I0122 05:53:43.426128 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7xln9" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="registry-server" containerID="cri-o://caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a" gracePeriod=2 Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.125343 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.138217 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.138556 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-utilities\") pod \"d0b962da-0f8b-4b31-9065-09964bb3c242\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.138596 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6w4jt\" (UniqueName: \"kubernetes.io/projected/d0b962da-0f8b-4b31-9065-09964bb3c242-kube-api-access-6w4jt\") pod \"d0b962da-0f8b-4b31-9065-09964bb3c242\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.138640 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-catalog-content\") pod \"d0b962da-0f8b-4b31-9065-09964bb3c242\" (UID: \"d0b962da-0f8b-4b31-9065-09964bb3c242\") " Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.139277 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-utilities" (OuterVolumeSpecName: "utilities") pod "d0b962da-0f8b-4b31-9065-09964bb3c242" (UID: "d0b962da-0f8b-4b31-9065-09964bb3c242"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.164134 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0b962da-0f8b-4b31-9065-09964bb3c242-kube-api-access-6w4jt" (OuterVolumeSpecName: "kube-api-access-6w4jt") pod "d0b962da-0f8b-4b31-9065-09964bb3c242" (UID: "d0b962da-0f8b-4b31-9065-09964bb3c242"). InnerVolumeSpecName "kube-api-access-6w4jt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.248668 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqwd6\" (UniqueName: \"kubernetes.io/projected/ef9f2307-3515-49f5-b666-bfcbd1536de1-kube-api-access-nqwd6\") pod \"ef9f2307-3515-49f5-b666-bfcbd1536de1\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.249465 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-inventory\") pod \"ef9f2307-3515-49f5-b666-bfcbd1536de1\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.249548 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovncontroller-config-0\") pod \"ef9f2307-3515-49f5-b666-bfcbd1536de1\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.249566 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ssh-key-openstack-edpm-ipam\") pod \"ef9f2307-3515-49f5-b666-bfcbd1536de1\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.249742 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovn-combined-ca-bundle\") pod \"ef9f2307-3515-49f5-b666-bfcbd1536de1\" (UID: \"ef9f2307-3515-49f5-b666-bfcbd1536de1\") " Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.250154 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.250166 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6w4jt\" (UniqueName: \"kubernetes.io/projected/d0b962da-0f8b-4b31-9065-09964bb3c242-kube-api-access-6w4jt\") on node \"crc\" DevicePath \"\"" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.256327 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "ef9f2307-3515-49f5-b666-bfcbd1536de1" (UID: "ef9f2307-3515-49f5-b666-bfcbd1536de1"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.256446 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef9f2307-3515-49f5-b666-bfcbd1536de1-kube-api-access-nqwd6" (OuterVolumeSpecName: "kube-api-access-nqwd6") pod "ef9f2307-3515-49f5-b666-bfcbd1536de1" (UID: "ef9f2307-3515-49f5-b666-bfcbd1536de1"). InnerVolumeSpecName "kube-api-access-nqwd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.278128 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ef9f2307-3515-49f5-b666-bfcbd1536de1" (UID: "ef9f2307-3515-49f5-b666-bfcbd1536de1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.283307 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-inventory" (OuterVolumeSpecName: "inventory") pod "ef9f2307-3515-49f5-b666-bfcbd1536de1" (UID: "ef9f2307-3515-49f5-b666-bfcbd1536de1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.284340 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "ef9f2307-3515-49f5-b666-bfcbd1536de1" (UID: "ef9f2307-3515-49f5-b666-bfcbd1536de1"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.324124 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d0b962da-0f8b-4b31-9065-09964bb3c242" (UID: "d0b962da-0f8b-4b31-9065-09964bb3c242"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.351903 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.351950 4814 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.351967 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.351981 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0b962da-0f8b-4b31-9065-09964bb3c242-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.351993 4814 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef9f2307-3515-49f5-b666-bfcbd1536de1-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.352005 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqwd6\" (UniqueName: \"kubernetes.io/projected/ef9f2307-3515-49f5-b666-bfcbd1536de1-kube-api-access-nqwd6\") on node \"crc\" DevicePath \"\"" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.434452 4814 generic.go:334] "Generic (PLEG): container finished" podID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerID="caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a" exitCode=0 Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.434511 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xln9" event={"ID":"d0b962da-0f8b-4b31-9065-09964bb3c242","Type":"ContainerDied","Data":"caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a"} Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.434538 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xln9" event={"ID":"d0b962da-0f8b-4b31-9065-09964bb3c242","Type":"ContainerDied","Data":"8835aa12d02c9d473011be0b46ae9b763b146c0283b2b6135570b583551debb5"} Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.434553 4814 scope.go:117] "RemoveContainer" containerID="caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.434704 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xln9" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.436901 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" event={"ID":"ef9f2307-3515-49f5-b666-bfcbd1536de1","Type":"ContainerDied","Data":"fa4bc33e82988e31006c7da50bc34140378e2b2359ca53e788fe2c60d3b8434b"} Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.436933 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa4bc33e82988e31006c7da50bc34140378e2b2359ca53e788fe2c60d3b8434b" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.436973 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-cz4dq" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.465785 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7xln9"] Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.466829 4814 scope.go:117] "RemoveContainer" containerID="7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.476921 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7xln9"] Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.491253 4814 scope.go:117] "RemoveContainer" containerID="57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.508340 4814 scope.go:117] "RemoveContainer" containerID="caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a" Jan 22 05:53:44 crc kubenswrapper[4814]: E0122 05:53:44.508871 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a\": container with ID starting with caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a not found: ID does not exist" containerID="caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.508913 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a"} err="failed to get container status \"caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a\": rpc error: code = NotFound desc = could not find container \"caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a\": container with ID starting with caf07dd5bc978b595e84a0cd556e80809858c2b28fcd680068b21ab6e220941a not found: ID does not exist" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.508940 4814 scope.go:117] "RemoveContainer" containerID="7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8" Jan 22 05:53:44 crc kubenswrapper[4814]: E0122 05:53:44.509435 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8\": container with ID starting with 7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8 not found: ID does not exist" containerID="7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.509465 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8"} err="failed to get container status \"7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8\": rpc error: code = NotFound desc = could not find container \"7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8\": container with ID starting with 7fe67667438497a99be7285272efc0dd6d0f5cdccf7bd69c106bef9b793ce3a8 not found: ID does not exist" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.509488 4814 scope.go:117] "RemoveContainer" containerID="57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14" Jan 22 05:53:44 crc kubenswrapper[4814]: E0122 05:53:44.509713 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14\": container with ID starting with 57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14 not found: ID does not exist" containerID="57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.509736 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14"} err="failed to get container status \"57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14\": rpc error: code = NotFound desc = could not find container \"57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14\": container with ID starting with 57b941fdf382b47458112487afc82557fcee87b457b15f056ba45e5c67552a14 not found: ID does not exist" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.621792 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8"] Jan 22 05:53:44 crc kubenswrapper[4814]: E0122 05:53:44.622143 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="extract-utilities" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.622160 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="extract-utilities" Jan 22 05:53:44 crc kubenswrapper[4814]: E0122 05:53:44.622196 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef9f2307-3515-49f5-b666-bfcbd1536de1" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.622203 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef9f2307-3515-49f5-b666-bfcbd1536de1" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 22 05:53:44 crc kubenswrapper[4814]: E0122 05:53:44.622214 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="registry-server" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.622220 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="registry-server" Jan 22 05:53:44 crc kubenswrapper[4814]: E0122 05:53:44.622230 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="extract-content" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.622236 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="extract-content" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.622383 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" containerName="registry-server" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.622411 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef9f2307-3515-49f5-b666-bfcbd1536de1" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.622986 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.629258 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.629284 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.629467 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.631041 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.631088 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.631838 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.634031 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8"] Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.761290 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.761357 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.761398 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.761415 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgpzm\" (UniqueName: \"kubernetes.io/projected/bdd8e538-692f-4b12-8b5b-8141cc9b055c-kube-api-access-qgpzm\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.761462 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.761489 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.862688 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.862768 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.862806 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.862826 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgpzm\" (UniqueName: \"kubernetes.io/projected/bdd8e538-692f-4b12-8b5b-8141cc9b055c-kube-api-access-qgpzm\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.862877 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.862906 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.866920 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.867238 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.867593 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.868460 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.871592 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.880809 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgpzm\" (UniqueName: \"kubernetes.io/projected/bdd8e538-692f-4b12-8b5b-8141cc9b055c-kube-api-access-qgpzm\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:44 crc kubenswrapper[4814]: I0122 05:53:44.941110 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:53:45 crc kubenswrapper[4814]: I0122 05:53:45.496405 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8"] Jan 22 05:53:46 crc kubenswrapper[4814]: I0122 05:53:46.354110 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0b962da-0f8b-4b31-9065-09964bb3c242" path="/var/lib/kubelet/pods/d0b962da-0f8b-4b31-9065-09964bb3c242/volumes" Jan 22 05:53:46 crc kubenswrapper[4814]: I0122 05:53:46.456682 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" event={"ID":"bdd8e538-692f-4b12-8b5b-8141cc9b055c","Type":"ContainerStarted","Data":"7442a0f71e9ef94282dfe61aee21e10353855c1e17600d55668f510a143695a0"} Jan 22 05:53:46 crc kubenswrapper[4814]: I0122 05:53:46.456726 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" event={"ID":"bdd8e538-692f-4b12-8b5b-8141cc9b055c","Type":"ContainerStarted","Data":"ea50f096960bfaf2a6ebd52405d7ba5a5880c85e7d9f6a832c4d8ba52ceec97d"} Jan 22 05:53:46 crc kubenswrapper[4814]: I0122 05:53:46.478507 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" podStartSLOduration=2.049403359 podStartE2EDuration="2.478490461s" podCreationTimestamp="2026-01-22 05:53:44 +0000 UTC" firstStartedPulling="2026-01-22 05:53:45.489975531 +0000 UTC m=+2111.573463756" lastFinishedPulling="2026-01-22 05:53:45.919062633 +0000 UTC m=+2112.002550858" observedRunningTime="2026-01-22 05:53:46.470959317 +0000 UTC m=+2112.554447532" watchObservedRunningTime="2026-01-22 05:53:46.478490461 +0000 UTC m=+2112.561978676" Jan 22 05:54:44 crc kubenswrapper[4814]: I0122 05:54:44.022333 4814 generic.go:334] "Generic (PLEG): container finished" podID="bdd8e538-692f-4b12-8b5b-8141cc9b055c" containerID="7442a0f71e9ef94282dfe61aee21e10353855c1e17600d55668f510a143695a0" exitCode=0 Jan 22 05:54:44 crc kubenswrapper[4814]: I0122 05:54:44.023104 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" event={"ID":"bdd8e538-692f-4b12-8b5b-8141cc9b055c","Type":"ContainerDied","Data":"7442a0f71e9ef94282dfe61aee21e10353855c1e17600d55668f510a143695a0"} Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.608859 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.786206 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-ovn-metadata-agent-neutron-config-0\") pod \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.786284 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-metadata-combined-ca-bundle\") pod \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.786303 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-nova-metadata-neutron-config-0\") pod \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.786325 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-inventory\") pod \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.786351 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-ssh-key-openstack-edpm-ipam\") pod \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.786393 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qgpzm\" (UniqueName: \"kubernetes.io/projected/bdd8e538-692f-4b12-8b5b-8141cc9b055c-kube-api-access-qgpzm\") pod \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\" (UID: \"bdd8e538-692f-4b12-8b5b-8141cc9b055c\") " Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.791575 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "bdd8e538-692f-4b12-8b5b-8141cc9b055c" (UID: "bdd8e538-692f-4b12-8b5b-8141cc9b055c"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.793156 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdd8e538-692f-4b12-8b5b-8141cc9b055c-kube-api-access-qgpzm" (OuterVolumeSpecName: "kube-api-access-qgpzm") pod "bdd8e538-692f-4b12-8b5b-8141cc9b055c" (UID: "bdd8e538-692f-4b12-8b5b-8141cc9b055c"). InnerVolumeSpecName "kube-api-access-qgpzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.826822 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "bdd8e538-692f-4b12-8b5b-8141cc9b055c" (UID: "bdd8e538-692f-4b12-8b5b-8141cc9b055c"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.826862 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "bdd8e538-692f-4b12-8b5b-8141cc9b055c" (UID: "bdd8e538-692f-4b12-8b5b-8141cc9b055c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.826902 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "bdd8e538-692f-4b12-8b5b-8141cc9b055c" (UID: "bdd8e538-692f-4b12-8b5b-8141cc9b055c"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.826839 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-inventory" (OuterVolumeSpecName: "inventory") pod "bdd8e538-692f-4b12-8b5b-8141cc9b055c" (UID: "bdd8e538-692f-4b12-8b5b-8141cc9b055c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.888905 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qgpzm\" (UniqueName: \"kubernetes.io/projected/bdd8e538-692f-4b12-8b5b-8141cc9b055c-kube-api-access-qgpzm\") on node \"crc\" DevicePath \"\"" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.888950 4814 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.888968 4814 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.888982 4814 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.888995 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:54:45 crc kubenswrapper[4814]: I0122 05:54:45.889008 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdd8e538-692f-4b12-8b5b-8141cc9b055c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.041547 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" event={"ID":"bdd8e538-692f-4b12-8b5b-8141cc9b055c","Type":"ContainerDied","Data":"ea50f096960bfaf2a6ebd52405d7ba5a5880c85e7d9f6a832c4d8ba52ceec97d"} Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.041593 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea50f096960bfaf2a6ebd52405d7ba5a5880c85e7d9f6a832c4d8ba52ceec97d" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.041619 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-zsjl8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.187115 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8"] Jan 22 05:54:46 crc kubenswrapper[4814]: E0122 05:54:46.187617 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd8e538-692f-4b12-8b5b-8141cc9b055c" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.187654 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd8e538-692f-4b12-8b5b-8141cc9b055c" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.187881 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdd8e538-692f-4b12-8b5b-8141cc9b055c" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.188604 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.192751 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.192775 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.192974 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.193021 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.202208 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.204273 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8"] Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.296129 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.296196 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.296236 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.296315 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.296439 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx79g\" (UniqueName: \"kubernetes.io/projected/ae3fef63-f370-4cba-bb63-be8a09063383-kube-api-access-rx79g\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.397649 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx79g\" (UniqueName: \"kubernetes.io/projected/ae3fef63-f370-4cba-bb63-be8a09063383-kube-api-access-rx79g\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.397724 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.397770 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.397809 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.397878 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.402524 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.403718 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.405375 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.406448 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.417032 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx79g\" (UniqueName: \"kubernetes.io/projected/ae3fef63-f370-4cba-bb63-be8a09063383-kube-api-access-rx79g\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:46 crc kubenswrapper[4814]: I0122 05:54:46.508736 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:54:47 crc kubenswrapper[4814]: I0122 05:54:47.118054 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8"] Jan 22 05:54:48 crc kubenswrapper[4814]: I0122 05:54:48.060360 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" event={"ID":"ae3fef63-f370-4cba-bb63-be8a09063383","Type":"ContainerStarted","Data":"72b0d0c8bc7f5e8a5b423436bca291fb0d4a1ba88058abbb779539221b6a69df"} Jan 22 05:54:49 crc kubenswrapper[4814]: I0122 05:54:49.072746 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" event={"ID":"ae3fef63-f370-4cba-bb63-be8a09063383","Type":"ContainerStarted","Data":"1db9faf6cfa5efc5af87a41b6083b9264ba3bee82f2713d16c863a412680d7fd"} Jan 22 05:54:49 crc kubenswrapper[4814]: I0122 05:54:49.112531 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" podStartSLOduration=2.370402032 podStartE2EDuration="3.112505695s" podCreationTimestamp="2026-01-22 05:54:46 +0000 UTC" firstStartedPulling="2026-01-22 05:54:47.120736504 +0000 UTC m=+2173.204225009" lastFinishedPulling="2026-01-22 05:54:47.862840447 +0000 UTC m=+2173.946328672" observedRunningTime="2026-01-22 05:54:49.10240696 +0000 UTC m=+2175.185895195" watchObservedRunningTime="2026-01-22 05:54:49.112505695 +0000 UTC m=+2175.195993920" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.437744 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6h4ch"] Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.439659 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.467911 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6h4ch"] Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.582568 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjmcs\" (UniqueName: \"kubernetes.io/projected/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-kube-api-access-gjmcs\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.582659 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-catalog-content\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.582731 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-utilities\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.684540 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjmcs\" (UniqueName: \"kubernetes.io/projected/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-kube-api-access-gjmcs\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.684597 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-catalog-content\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.684680 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-utilities\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.685117 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-utilities\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.685585 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-catalog-content\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.711940 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjmcs\" (UniqueName: \"kubernetes.io/projected/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-kube-api-access-gjmcs\") pod \"certified-operators-6h4ch\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:50 crc kubenswrapper[4814]: I0122 05:54:50.761370 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:54:51 crc kubenswrapper[4814]: I0122 05:54:51.356093 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6h4ch"] Jan 22 05:54:51 crc kubenswrapper[4814]: I0122 05:54:51.834504 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vdbhf"] Jan 22 05:54:51 crc kubenswrapper[4814]: I0122 05:54:51.837031 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:51 crc kubenswrapper[4814]: I0122 05:54:51.867585 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vdbhf"] Jan 22 05:54:51 crc kubenswrapper[4814]: I0122 05:54:51.908497 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-utilities\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:51 crc kubenswrapper[4814]: I0122 05:54:51.908534 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-catalog-content\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:51 crc kubenswrapper[4814]: I0122 05:54:51.908660 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsp49\" (UniqueName: \"kubernetes.io/projected/d5d4cea0-8e5a-48ed-8967-a61a7559552d-kube-api-access-qsp49\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.010463 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-utilities\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.010520 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-catalog-content\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.010657 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsp49\" (UniqueName: \"kubernetes.io/projected/d5d4cea0-8e5a-48ed-8967-a61a7559552d-kube-api-access-qsp49\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.011216 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-utilities\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.011231 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-catalog-content\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.033771 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsp49\" (UniqueName: \"kubernetes.io/projected/d5d4cea0-8e5a-48ed-8967-a61a7559552d-kube-api-access-qsp49\") pod \"community-operators-vdbhf\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.097095 4814 generic.go:334] "Generic (PLEG): container finished" podID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerID="5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496" exitCode=0 Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.097137 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h4ch" event={"ID":"0a0a4763-f5ac-45c5-9948-c365ac5a4e15","Type":"ContainerDied","Data":"5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496"} Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.097161 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h4ch" event={"ID":"0a0a4763-f5ac-45c5-9948-c365ac5a4e15","Type":"ContainerStarted","Data":"71d416b8d6f9092c0daba6ffd22af2773c6d45554e54414272bcaf1518e29d33"} Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.099505 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.155883 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:54:52 crc kubenswrapper[4814]: I0122 05:54:52.738207 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vdbhf"] Jan 22 05:54:52 crc kubenswrapper[4814]: W0122 05:54:52.743035 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5d4cea0_8e5a_48ed_8967_a61a7559552d.slice/crio-7f84dd17c68374702c4173e5298c0c9664d684bb9389ad0390c4aca5021e1d47 WatchSource:0}: Error finding container 7f84dd17c68374702c4173e5298c0c9664d684bb9389ad0390c4aca5021e1d47: Status 404 returned error can't find the container with id 7f84dd17c68374702c4173e5298c0c9664d684bb9389ad0390c4aca5021e1d47 Jan 22 05:54:53 crc kubenswrapper[4814]: I0122 05:54:53.107458 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h4ch" event={"ID":"0a0a4763-f5ac-45c5-9948-c365ac5a4e15","Type":"ContainerStarted","Data":"dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d"} Jan 22 05:54:53 crc kubenswrapper[4814]: I0122 05:54:53.109484 4814 generic.go:334] "Generic (PLEG): container finished" podID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerID="d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440" exitCode=0 Jan 22 05:54:53 crc kubenswrapper[4814]: I0122 05:54:53.109583 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdbhf" event={"ID":"d5d4cea0-8e5a-48ed-8967-a61a7559552d","Type":"ContainerDied","Data":"d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440"} Jan 22 05:54:53 crc kubenswrapper[4814]: I0122 05:54:53.109678 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdbhf" event={"ID":"d5d4cea0-8e5a-48ed-8967-a61a7559552d","Type":"ContainerStarted","Data":"7f84dd17c68374702c4173e5298c0c9664d684bb9389ad0390c4aca5021e1d47"} Jan 22 05:54:54 crc kubenswrapper[4814]: I0122 05:54:54.119448 4814 generic.go:334] "Generic (PLEG): container finished" podID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerID="dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d" exitCode=0 Jan 22 05:54:54 crc kubenswrapper[4814]: I0122 05:54:54.119677 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h4ch" event={"ID":"0a0a4763-f5ac-45c5-9948-c365ac5a4e15","Type":"ContainerDied","Data":"dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d"} Jan 22 05:54:54 crc kubenswrapper[4814]: I0122 05:54:54.122476 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdbhf" event={"ID":"d5d4cea0-8e5a-48ed-8967-a61a7559552d","Type":"ContainerStarted","Data":"b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042"} Jan 22 05:54:56 crc kubenswrapper[4814]: I0122 05:54:56.141434 4814 generic.go:334] "Generic (PLEG): container finished" podID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerID="b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042" exitCode=0 Jan 22 05:54:56 crc kubenswrapper[4814]: I0122 05:54:56.141764 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdbhf" event={"ID":"d5d4cea0-8e5a-48ed-8967-a61a7559552d","Type":"ContainerDied","Data":"b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042"} Jan 22 05:54:57 crc kubenswrapper[4814]: I0122 05:54:57.156876 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h4ch" event={"ID":"0a0a4763-f5ac-45c5-9948-c365ac5a4e15","Type":"ContainerStarted","Data":"be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd"} Jan 22 05:54:58 crc kubenswrapper[4814]: I0122 05:54:58.174919 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdbhf" event={"ID":"d5d4cea0-8e5a-48ed-8967-a61a7559552d","Type":"ContainerStarted","Data":"08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35"} Jan 22 05:54:58 crc kubenswrapper[4814]: I0122 05:54:58.192525 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6h4ch" podStartSLOduration=3.494544945 podStartE2EDuration="8.192503347s" podCreationTimestamp="2026-01-22 05:54:50 +0000 UTC" firstStartedPulling="2026-01-22 05:54:52.099298988 +0000 UTC m=+2178.182787203" lastFinishedPulling="2026-01-22 05:54:56.79725738 +0000 UTC m=+2182.880745605" observedRunningTime="2026-01-22 05:54:57.18224128 +0000 UTC m=+2183.265729495" watchObservedRunningTime="2026-01-22 05:54:58.192503347 +0000 UTC m=+2184.275991572" Jan 22 05:54:58 crc kubenswrapper[4814]: I0122 05:54:58.202690 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vdbhf" podStartSLOduration=3.343010028 podStartE2EDuration="7.202659684s" podCreationTimestamp="2026-01-22 05:54:51 +0000 UTC" firstStartedPulling="2026-01-22 05:54:53.111063442 +0000 UTC m=+2179.194551657" lastFinishedPulling="2026-01-22 05:54:56.970713088 +0000 UTC m=+2183.054201313" observedRunningTime="2026-01-22 05:54:58.19225227 +0000 UTC m=+2184.275740485" watchObservedRunningTime="2026-01-22 05:54:58.202659684 +0000 UTC m=+2184.286147919" Jan 22 05:55:00 crc kubenswrapper[4814]: I0122 05:55:00.763319 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:55:00 crc kubenswrapper[4814]: I0122 05:55:00.763852 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:55:00 crc kubenswrapper[4814]: I0122 05:55:00.815098 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:55:02 crc kubenswrapper[4814]: I0122 05:55:02.157129 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:55:02 crc kubenswrapper[4814]: I0122 05:55:02.157519 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:55:02 crc kubenswrapper[4814]: I0122 05:55:02.242985 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:55:02 crc kubenswrapper[4814]: I0122 05:55:02.310968 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:55:03 crc kubenswrapper[4814]: I0122 05:55:03.430039 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vdbhf"] Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.238972 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vdbhf" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerName="registry-server" containerID="cri-o://08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35" gracePeriod=2 Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.799611 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.862025 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-catalog-content\") pod \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.862097 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsp49\" (UniqueName: \"kubernetes.io/projected/d5d4cea0-8e5a-48ed-8967-a61a7559552d-kube-api-access-qsp49\") pod \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.862210 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-utilities\") pod \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\" (UID: \"d5d4cea0-8e5a-48ed-8967-a61a7559552d\") " Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.863065 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-utilities" (OuterVolumeSpecName: "utilities") pod "d5d4cea0-8e5a-48ed-8967-a61a7559552d" (UID: "d5d4cea0-8e5a-48ed-8967-a61a7559552d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.868905 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5d4cea0-8e5a-48ed-8967-a61a7559552d-kube-api-access-qsp49" (OuterVolumeSpecName: "kube-api-access-qsp49") pod "d5d4cea0-8e5a-48ed-8967-a61a7559552d" (UID: "d5d4cea0-8e5a-48ed-8967-a61a7559552d"). InnerVolumeSpecName "kube-api-access-qsp49". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.927373 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d5d4cea0-8e5a-48ed-8967-a61a7559552d" (UID: "d5d4cea0-8e5a-48ed-8967-a61a7559552d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.964812 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.964840 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsp49\" (UniqueName: \"kubernetes.io/projected/d5d4cea0-8e5a-48ed-8967-a61a7559552d-kube-api-access-qsp49\") on node \"crc\" DevicePath \"\"" Jan 22 05:55:04 crc kubenswrapper[4814]: I0122 05:55:04.964853 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5d4cea0-8e5a-48ed-8967-a61a7559552d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.250894 4814 generic.go:334] "Generic (PLEG): container finished" podID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerID="08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35" exitCode=0 Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.250941 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdbhf" event={"ID":"d5d4cea0-8e5a-48ed-8967-a61a7559552d","Type":"ContainerDied","Data":"08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35"} Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.250994 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vdbhf" event={"ID":"d5d4cea0-8e5a-48ed-8967-a61a7559552d","Type":"ContainerDied","Data":"7f84dd17c68374702c4173e5298c0c9664d684bb9389ad0390c4aca5021e1d47"} Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.251017 4814 scope.go:117] "RemoveContainer" containerID="08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.251568 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vdbhf" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.277436 4814 scope.go:117] "RemoveContainer" containerID="b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.301380 4814 scope.go:117] "RemoveContainer" containerID="d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.312960 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vdbhf"] Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.325311 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vdbhf"] Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.347854 4814 scope.go:117] "RemoveContainer" containerID="08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35" Jan 22 05:55:05 crc kubenswrapper[4814]: E0122 05:55:05.348236 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35\": container with ID starting with 08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35 not found: ID does not exist" containerID="08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.348271 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35"} err="failed to get container status \"08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35\": rpc error: code = NotFound desc = could not find container \"08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35\": container with ID starting with 08f79d8925e68de87a4b263257fb231204451fc1b7103da978300f119273db35 not found: ID does not exist" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.348292 4814 scope.go:117] "RemoveContainer" containerID="b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042" Jan 22 05:55:05 crc kubenswrapper[4814]: E0122 05:55:05.348515 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042\": container with ID starting with b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042 not found: ID does not exist" containerID="b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.348640 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042"} err="failed to get container status \"b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042\": rpc error: code = NotFound desc = could not find container \"b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042\": container with ID starting with b0faeed0c4443495da74bdd18d84017034b2cce7a2ac4fc7ec56ab1b26d35042 not found: ID does not exist" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.348734 4814 scope.go:117] "RemoveContainer" containerID="d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440" Jan 22 05:55:05 crc kubenswrapper[4814]: E0122 05:55:05.349129 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440\": container with ID starting with d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440 not found: ID does not exist" containerID="d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440" Jan 22 05:55:05 crc kubenswrapper[4814]: I0122 05:55:05.349155 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440"} err="failed to get container status \"d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440\": rpc error: code = NotFound desc = could not find container \"d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440\": container with ID starting with d110e365841ea7215e59f437e104c51c85188d616f3b9d86204d7c8bf7d3f440 not found: ID does not exist" Jan 22 05:55:06 crc kubenswrapper[4814]: I0122 05:55:06.361799 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" path="/var/lib/kubelet/pods/d5d4cea0-8e5a-48ed-8967-a61a7559552d/volumes" Jan 22 05:55:10 crc kubenswrapper[4814]: I0122 05:55:10.814462 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:55:10 crc kubenswrapper[4814]: I0122 05:55:10.873288 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6h4ch"] Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.303610 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6h4ch" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerName="registry-server" containerID="cri-o://be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd" gracePeriod=2 Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.812682 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.909186 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-utilities\") pod \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.909917 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-utilities" (OuterVolumeSpecName: "utilities") pod "0a0a4763-f5ac-45c5-9948-c365ac5a4e15" (UID: "0a0a4763-f5ac-45c5-9948-c365ac5a4e15"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.910108 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjmcs\" (UniqueName: \"kubernetes.io/projected/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-kube-api-access-gjmcs\") pod \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.911034 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-catalog-content\") pod \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\" (UID: \"0a0a4763-f5ac-45c5-9948-c365ac5a4e15\") " Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.911791 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.918598 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-kube-api-access-gjmcs" (OuterVolumeSpecName: "kube-api-access-gjmcs") pod "0a0a4763-f5ac-45c5-9948-c365ac5a4e15" (UID: "0a0a4763-f5ac-45c5-9948-c365ac5a4e15"). InnerVolumeSpecName "kube-api-access-gjmcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:55:11 crc kubenswrapper[4814]: I0122 05:55:11.956345 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a0a4763-f5ac-45c5-9948-c365ac5a4e15" (UID: "0a0a4763-f5ac-45c5-9948-c365ac5a4e15"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.013957 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjmcs\" (UniqueName: \"kubernetes.io/projected/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-kube-api-access-gjmcs\") on node \"crc\" DevicePath \"\"" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.013999 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a0a4763-f5ac-45c5-9948-c365ac5a4e15-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.315394 4814 generic.go:334] "Generic (PLEG): container finished" podID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerID="be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd" exitCode=0 Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.315442 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h4ch" event={"ID":"0a0a4763-f5ac-45c5-9948-c365ac5a4e15","Type":"ContainerDied","Data":"be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd"} Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.315449 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6h4ch" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.315469 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6h4ch" event={"ID":"0a0a4763-f5ac-45c5-9948-c365ac5a4e15","Type":"ContainerDied","Data":"71d416b8d6f9092c0daba6ffd22af2773c6d45554e54414272bcaf1518e29d33"} Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.315488 4814 scope.go:117] "RemoveContainer" containerID="be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.354285 4814 scope.go:117] "RemoveContainer" containerID="dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.401399 4814 scope.go:117] "RemoveContainer" containerID="5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.420160 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6h4ch"] Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.430641 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6h4ch"] Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.450522 4814 scope.go:117] "RemoveContainer" containerID="be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd" Jan 22 05:55:12 crc kubenswrapper[4814]: E0122 05:55:12.450984 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd\": container with ID starting with be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd not found: ID does not exist" containerID="be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.451026 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd"} err="failed to get container status \"be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd\": rpc error: code = NotFound desc = could not find container \"be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd\": container with ID starting with be9fb3b2509a7c3c4670f5cdcd503c9528c34b9257563c3bf7abf40562fc38bd not found: ID does not exist" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.451052 4814 scope.go:117] "RemoveContainer" containerID="dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d" Jan 22 05:55:12 crc kubenswrapper[4814]: E0122 05:55:12.451291 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d\": container with ID starting with dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d not found: ID does not exist" containerID="dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.451398 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d"} err="failed to get container status \"dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d\": rpc error: code = NotFound desc = could not find container \"dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d\": container with ID starting with dd42e24ee86a790f71cf62619543fcff3c0bb2e70e15a593277cd5a01004571d not found: ID does not exist" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.451492 4814 scope.go:117] "RemoveContainer" containerID="5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496" Jan 22 05:55:12 crc kubenswrapper[4814]: E0122 05:55:12.451889 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496\": container with ID starting with 5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496 not found: ID does not exist" containerID="5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496" Jan 22 05:55:12 crc kubenswrapper[4814]: I0122 05:55:12.452022 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496"} err="failed to get container status \"5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496\": rpc error: code = NotFound desc = could not find container \"5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496\": container with ID starting with 5506c2fe761e1899a1a6786ba018be0e347f22c1a43b81d749c075ba572d4496 not found: ID does not exist" Jan 22 05:55:14 crc kubenswrapper[4814]: I0122 05:55:14.363674 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" path="/var/lib/kubelet/pods/0a0a4763-f5ac-45c5-9948-c365ac5a4e15/volumes" Jan 22 05:55:19 crc kubenswrapper[4814]: I0122 05:55:19.614003 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:55:19 crc kubenswrapper[4814]: I0122 05:55:19.614501 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:55:49 crc kubenswrapper[4814]: I0122 05:55:49.614115 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:55:49 crc kubenswrapper[4814]: I0122 05:55:49.614654 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:56:19 crc kubenswrapper[4814]: I0122 05:56:19.614456 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 05:56:19 crc kubenswrapper[4814]: I0122 05:56:19.615183 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 05:56:19 crc kubenswrapper[4814]: I0122 05:56:19.615248 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 05:56:19 crc kubenswrapper[4814]: I0122 05:56:19.616258 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 05:56:19 crc kubenswrapper[4814]: I0122 05:56:19.616366 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" gracePeriod=600 Jan 22 05:56:19 crc kubenswrapper[4814]: E0122 05:56:19.750268 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:56:20 crc kubenswrapper[4814]: I0122 05:56:20.026296 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" exitCode=0 Jan 22 05:56:20 crc kubenswrapper[4814]: I0122 05:56:20.026351 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0"} Jan 22 05:56:20 crc kubenswrapper[4814]: I0122 05:56:20.026395 4814 scope.go:117] "RemoveContainer" containerID="eba34c6458f41fc6c68b900130343d2fa0aa66d8953003d31615c0f8d2def5cd" Jan 22 05:56:20 crc kubenswrapper[4814]: I0122 05:56:20.026808 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:56:20 crc kubenswrapper[4814]: E0122 05:56:20.027127 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:56:32 crc kubenswrapper[4814]: I0122 05:56:32.345556 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:56:32 crc kubenswrapper[4814]: E0122 05:56:32.346450 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:56:44 crc kubenswrapper[4814]: I0122 05:56:44.344479 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:56:44 crc kubenswrapper[4814]: E0122 05:56:44.345456 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:56:56 crc kubenswrapper[4814]: I0122 05:56:56.343930 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:56:56 crc kubenswrapper[4814]: E0122 05:56:56.344812 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:57:10 crc kubenswrapper[4814]: I0122 05:57:10.344243 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:57:10 crc kubenswrapper[4814]: E0122 05:57:10.344895 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:57:21 crc kubenswrapper[4814]: I0122 05:57:21.344371 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:57:21 crc kubenswrapper[4814]: E0122 05:57:21.345576 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:57:35 crc kubenswrapper[4814]: I0122 05:57:35.343956 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:57:35 crc kubenswrapper[4814]: E0122 05:57:35.344913 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:57:50 crc kubenswrapper[4814]: I0122 05:57:50.344354 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:57:50 crc kubenswrapper[4814]: E0122 05:57:50.345590 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:58:04 crc kubenswrapper[4814]: I0122 05:58:04.363465 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:58:04 crc kubenswrapper[4814]: E0122 05:58:04.366446 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:58:19 crc kubenswrapper[4814]: I0122 05:58:19.343851 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:58:19 crc kubenswrapper[4814]: E0122 05:58:19.344501 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:58:34 crc kubenswrapper[4814]: I0122 05:58:34.349669 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:58:34 crc kubenswrapper[4814]: E0122 05:58:34.350350 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:58:49 crc kubenswrapper[4814]: I0122 05:58:49.345020 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:58:49 crc kubenswrapper[4814]: E0122 05:58:49.346120 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:59:01 crc kubenswrapper[4814]: I0122 05:59:01.345206 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:59:01 crc kubenswrapper[4814]: E0122 05:59:01.346576 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:59:12 crc kubenswrapper[4814]: I0122 05:59:12.344250 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:59:12 crc kubenswrapper[4814]: E0122 05:59:12.344836 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:59:26 crc kubenswrapper[4814]: I0122 05:59:26.344745 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:59:26 crc kubenswrapper[4814]: E0122 05:59:26.345791 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:59:39 crc kubenswrapper[4814]: I0122 05:59:39.013344 4814 generic.go:334] "Generic (PLEG): container finished" podID="ae3fef63-f370-4cba-bb63-be8a09063383" containerID="1db9faf6cfa5efc5af87a41b6083b9264ba3bee82f2713d16c863a412680d7fd" exitCode=0 Jan 22 05:59:39 crc kubenswrapper[4814]: I0122 05:59:39.013434 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" event={"ID":"ae3fef63-f370-4cba-bb63-be8a09063383","Type":"ContainerDied","Data":"1db9faf6cfa5efc5af87a41b6083b9264ba3bee82f2713d16c863a412680d7fd"} Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.443652 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.588550 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-secret-0\") pod \"ae3fef63-f370-4cba-bb63-be8a09063383\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.588770 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-inventory\") pod \"ae3fef63-f370-4cba-bb63-be8a09063383\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.588805 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-combined-ca-bundle\") pod \"ae3fef63-f370-4cba-bb63-be8a09063383\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.588838 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-ssh-key-openstack-edpm-ipam\") pod \"ae3fef63-f370-4cba-bb63-be8a09063383\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.588898 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rx79g\" (UniqueName: \"kubernetes.io/projected/ae3fef63-f370-4cba-bb63-be8a09063383-kube-api-access-rx79g\") pod \"ae3fef63-f370-4cba-bb63-be8a09063383\" (UID: \"ae3fef63-f370-4cba-bb63-be8a09063383\") " Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.598774 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae3fef63-f370-4cba-bb63-be8a09063383-kube-api-access-rx79g" (OuterVolumeSpecName: "kube-api-access-rx79g") pod "ae3fef63-f370-4cba-bb63-be8a09063383" (UID: "ae3fef63-f370-4cba-bb63-be8a09063383"). InnerVolumeSpecName "kube-api-access-rx79g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.617405 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "ae3fef63-f370-4cba-bb63-be8a09063383" (UID: "ae3fef63-f370-4cba-bb63-be8a09063383"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.618952 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "ae3fef63-f370-4cba-bb63-be8a09063383" (UID: "ae3fef63-f370-4cba-bb63-be8a09063383"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.619489 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ae3fef63-f370-4cba-bb63-be8a09063383" (UID: "ae3fef63-f370-4cba-bb63-be8a09063383"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.619848 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-inventory" (OuterVolumeSpecName: "inventory") pod "ae3fef63-f370-4cba-bb63-be8a09063383" (UID: "ae3fef63-f370-4cba-bb63-be8a09063383"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.691262 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.691313 4814 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.691335 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.691353 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rx79g\" (UniqueName: \"kubernetes.io/projected/ae3fef63-f370-4cba-bb63-be8a09063383-kube-api-access-rx79g\") on node \"crc\" DevicePath \"\"" Jan 22 05:59:40 crc kubenswrapper[4814]: I0122 05:59:40.691371 4814 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ae3fef63-f370-4cba-bb63-be8a09063383-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.030233 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" event={"ID":"ae3fef63-f370-4cba-bb63-be8a09063383","Type":"ContainerDied","Data":"72b0d0c8bc7f5e8a5b423436bca291fb0d4a1ba88058abbb779539221b6a69df"} Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.030267 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-mqxg8" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.030270 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72b0d0c8bc7f5e8a5b423436bca291fb0d4a1ba88058abbb779539221b6a69df" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.242851 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4"] Jan 22 05:59:41 crc kubenswrapper[4814]: E0122 05:59:41.243598 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerName="extract-utilities" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248461 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerName="extract-utilities" Jan 22 05:59:41 crc kubenswrapper[4814]: E0122 05:59:41.248496 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerName="registry-server" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248504 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerName="registry-server" Jan 22 05:59:41 crc kubenswrapper[4814]: E0122 05:59:41.248517 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerName="extract-content" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248530 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerName="extract-content" Jan 22 05:59:41 crc kubenswrapper[4814]: E0122 05:59:41.248554 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerName="extract-content" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248561 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerName="extract-content" Jan 22 05:59:41 crc kubenswrapper[4814]: E0122 05:59:41.248575 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerName="extract-utilities" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248581 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerName="extract-utilities" Jan 22 05:59:41 crc kubenswrapper[4814]: E0122 05:59:41.248604 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerName="registry-server" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248609 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerName="registry-server" Jan 22 05:59:41 crc kubenswrapper[4814]: E0122 05:59:41.248617 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae3fef63-f370-4cba-bb63-be8a09063383" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248637 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae3fef63-f370-4cba-bb63-be8a09063383" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248911 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5d4cea0-8e5a-48ed-8967-a61a7559552d" containerName="registry-server" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248926 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a0a4763-f5ac-45c5-9948-c365ac5a4e15" containerName="registry-server" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.248935 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae3fef63-f370-4cba-bb63-be8a09063383" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.249522 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.251308 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.251967 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.252197 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.252991 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.253259 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.256320 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.259077 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.268251 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4"] Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.303220 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.303302 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.303490 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.303636 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.303676 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.303826 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.303992 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.304060 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.304115 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d827n\" (UniqueName: \"kubernetes.io/projected/ecb3233e-ffca-465f-b74d-ed1f70ae955f-kube-api-access-d827n\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.344271 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:59:41 crc kubenswrapper[4814]: E0122 05:59:41.344567 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405538 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405605 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405652 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405675 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d827n\" (UniqueName: \"kubernetes.io/projected/ecb3233e-ffca-465f-b74d-ed1f70ae955f-kube-api-access-d827n\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405742 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405778 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405810 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405845 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.405864 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.407105 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.415985 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.416868 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.417046 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.421095 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.425144 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.425248 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.425474 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.434404 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d827n\" (UniqueName: \"kubernetes.io/projected/ecb3233e-ffca-465f-b74d-ed1f70ae955f-kube-api-access-d827n\") pod \"nova-edpm-deployment-openstack-edpm-ipam-rg9l4\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:41 crc kubenswrapper[4814]: I0122 05:59:41.563506 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 05:59:42 crc kubenswrapper[4814]: I0122 05:59:42.131001 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4"] Jan 22 05:59:43 crc kubenswrapper[4814]: I0122 05:59:43.048571 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" event={"ID":"ecb3233e-ffca-465f-b74d-ed1f70ae955f","Type":"ContainerStarted","Data":"edb79981c6a3c427036f39408686f9b05c2068ecf4509284857212a3bd217388"} Jan 22 05:59:43 crc kubenswrapper[4814]: I0122 05:59:43.049685 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" event={"ID":"ecb3233e-ffca-465f-b74d-ed1f70ae955f","Type":"ContainerStarted","Data":"f051d95858674a2bc6ac48013e9d9a5c1706fcb3e04b831cd8a42c74f7af4855"} Jan 22 05:59:43 crc kubenswrapper[4814]: I0122 05:59:43.071126 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" podStartSLOduration=1.483237972 podStartE2EDuration="2.071111886s" podCreationTimestamp="2026-01-22 05:59:41 +0000 UTC" firstStartedPulling="2026-01-22 05:59:42.137178954 +0000 UTC m=+2468.220667169" lastFinishedPulling="2026-01-22 05:59:42.725052828 +0000 UTC m=+2468.808541083" observedRunningTime="2026-01-22 05:59:43.064899693 +0000 UTC m=+2469.148387908" watchObservedRunningTime="2026-01-22 05:59:43.071111886 +0000 UTC m=+2469.154600101" Jan 22 05:59:52 crc kubenswrapper[4814]: I0122 05:59:52.345839 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 05:59:52 crc kubenswrapper[4814]: E0122 05:59:52.347023 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.163213 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9"] Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.165203 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.168225 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.168555 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.185193 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9"] Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.299275 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7bnv\" (UniqueName: \"kubernetes.io/projected/087abff4-52e5-485f-aa9a-f4d3d607f233-kube-api-access-k7bnv\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.299362 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/087abff4-52e5-485f-aa9a-f4d3d607f233-secret-volume\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.299726 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/087abff4-52e5-485f-aa9a-f4d3d607f233-config-volume\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.403013 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/087abff4-52e5-485f-aa9a-f4d3d607f233-config-volume\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.403998 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/087abff4-52e5-485f-aa9a-f4d3d607f233-config-volume\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.404134 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7bnv\" (UniqueName: \"kubernetes.io/projected/087abff4-52e5-485f-aa9a-f4d3d607f233-kube-api-access-k7bnv\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.404584 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/087abff4-52e5-485f-aa9a-f4d3d607f233-secret-volume\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.410054 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/087abff4-52e5-485f-aa9a-f4d3d607f233-secret-volume\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.424674 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7bnv\" (UniqueName: \"kubernetes.io/projected/087abff4-52e5-485f-aa9a-f4d3d607f233-kube-api-access-k7bnv\") pod \"collect-profiles-29484360-w7nh9\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.494446 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:00 crc kubenswrapper[4814]: I0122 06:00:00.760736 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9"] Jan 22 06:00:01 crc kubenswrapper[4814]: I0122 06:00:01.223189 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" event={"ID":"087abff4-52e5-485f-aa9a-f4d3d607f233","Type":"ContainerStarted","Data":"e7e456a33ad766e2c9f7fd57ba866420bc95989fa475d5ed7642b659a39e5e56"} Jan 22 06:00:01 crc kubenswrapper[4814]: I0122 06:00:01.223508 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" event={"ID":"087abff4-52e5-485f-aa9a-f4d3d607f233","Type":"ContainerStarted","Data":"dd3c7fe8a1df9c6d15dfa82cf7e22cae11c7b359a873a8c1bfe6909b8fdb5198"} Jan 22 06:00:01 crc kubenswrapper[4814]: I0122 06:00:01.247724 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" podStartSLOduration=1.247705711 podStartE2EDuration="1.247705711s" podCreationTimestamp="2026-01-22 06:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:00:01.243144658 +0000 UTC m=+2487.326632873" watchObservedRunningTime="2026-01-22 06:00:01.247705711 +0000 UTC m=+2487.331193926" Jan 22 06:00:02 crc kubenswrapper[4814]: I0122 06:00:02.235979 4814 generic.go:334] "Generic (PLEG): container finished" podID="087abff4-52e5-485f-aa9a-f4d3d607f233" containerID="e7e456a33ad766e2c9f7fd57ba866420bc95989fa475d5ed7642b659a39e5e56" exitCode=0 Jan 22 06:00:02 crc kubenswrapper[4814]: I0122 06:00:02.236034 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" event={"ID":"087abff4-52e5-485f-aa9a-f4d3d607f233","Type":"ContainerDied","Data":"e7e456a33ad766e2c9f7fd57ba866420bc95989fa475d5ed7642b659a39e5e56"} Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.642014 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.806041 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7bnv\" (UniqueName: \"kubernetes.io/projected/087abff4-52e5-485f-aa9a-f4d3d607f233-kube-api-access-k7bnv\") pod \"087abff4-52e5-485f-aa9a-f4d3d607f233\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.806171 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/087abff4-52e5-485f-aa9a-f4d3d607f233-secret-volume\") pod \"087abff4-52e5-485f-aa9a-f4d3d607f233\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.806277 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/087abff4-52e5-485f-aa9a-f4d3d607f233-config-volume\") pod \"087abff4-52e5-485f-aa9a-f4d3d607f233\" (UID: \"087abff4-52e5-485f-aa9a-f4d3d607f233\") " Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.806777 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/087abff4-52e5-485f-aa9a-f4d3d607f233-config-volume" (OuterVolumeSpecName: "config-volume") pod "087abff4-52e5-485f-aa9a-f4d3d607f233" (UID: "087abff4-52e5-485f-aa9a-f4d3d607f233"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.812001 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/087abff4-52e5-485f-aa9a-f4d3d607f233-kube-api-access-k7bnv" (OuterVolumeSpecName: "kube-api-access-k7bnv") pod "087abff4-52e5-485f-aa9a-f4d3d607f233" (UID: "087abff4-52e5-485f-aa9a-f4d3d607f233"). InnerVolumeSpecName "kube-api-access-k7bnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.814882 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/087abff4-52e5-485f-aa9a-f4d3d607f233-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "087abff4-52e5-485f-aa9a-f4d3d607f233" (UID: "087abff4-52e5-485f-aa9a-f4d3d607f233"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.909013 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/087abff4-52e5-485f-aa9a-f4d3d607f233-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.909045 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7bnv\" (UniqueName: \"kubernetes.io/projected/087abff4-52e5-485f-aa9a-f4d3d607f233-kube-api-access-k7bnv\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:03 crc kubenswrapper[4814]: I0122 06:00:03.909072 4814 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/087abff4-52e5-485f-aa9a-f4d3d607f233-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:04 crc kubenswrapper[4814]: I0122 06:00:04.259063 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" event={"ID":"087abff4-52e5-485f-aa9a-f4d3d607f233","Type":"ContainerDied","Data":"dd3c7fe8a1df9c6d15dfa82cf7e22cae11c7b359a873a8c1bfe6909b8fdb5198"} Jan 22 06:00:04 crc kubenswrapper[4814]: I0122 06:00:04.259123 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd3c7fe8a1df9c6d15dfa82cf7e22cae11c7b359a873a8c1bfe6909b8fdb5198" Jan 22 06:00:04 crc kubenswrapper[4814]: I0122 06:00:04.259183 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9" Jan 22 06:00:04 crc kubenswrapper[4814]: I0122 06:00:04.324245 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt"] Jan 22 06:00:04 crc kubenswrapper[4814]: I0122 06:00:04.334787 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484315-6vxkt"] Jan 22 06:00:04 crc kubenswrapper[4814]: I0122 06:00:04.355440 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fcca12ae-2952-47fb-b97c-6d913948ae44" path="/var/lib/kubelet/pods/fcca12ae-2952-47fb-b97c-6d913948ae44/volumes" Jan 22 06:00:06 crc kubenswrapper[4814]: I0122 06:00:06.344224 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 06:00:06 crc kubenswrapper[4814]: E0122 06:00:06.344673 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:00:21 crc kubenswrapper[4814]: I0122 06:00:21.344587 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 06:00:21 crc kubenswrapper[4814]: E0122 06:00:21.345808 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.737570 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-knxb6"] Jan 22 06:00:22 crc kubenswrapper[4814]: E0122 06:00:22.738162 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087abff4-52e5-485f-aa9a-f4d3d607f233" containerName="collect-profiles" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.738174 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="087abff4-52e5-485f-aa9a-f4d3d607f233" containerName="collect-profiles" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.738345 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="087abff4-52e5-485f-aa9a-f4d3d607f233" containerName="collect-profiles" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.739590 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.763409 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-knxb6"] Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.844328 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz4vk\" (UniqueName: \"kubernetes.io/projected/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-kube-api-access-hz4vk\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.844670 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-utilities\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.844841 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-catalog-content\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.946009 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-catalog-content\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.946071 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz4vk\" (UniqueName: \"kubernetes.io/projected/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-kube-api-access-hz4vk\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.946161 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-utilities\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.946671 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-utilities\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.946832 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-catalog-content\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:22 crc kubenswrapper[4814]: I0122 06:00:22.965613 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz4vk\" (UniqueName: \"kubernetes.io/projected/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-kube-api-access-hz4vk\") pod \"redhat-marketplace-knxb6\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:23 crc kubenswrapper[4814]: I0122 06:00:23.059113 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:23 crc kubenswrapper[4814]: I0122 06:00:23.555142 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-knxb6"] Jan 22 06:00:24 crc kubenswrapper[4814]: I0122 06:00:24.430215 4814 generic.go:334] "Generic (PLEG): container finished" podID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerID="bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1" exitCode=0 Jan 22 06:00:24 crc kubenswrapper[4814]: I0122 06:00:24.430330 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knxb6" event={"ID":"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe","Type":"ContainerDied","Data":"bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1"} Jan 22 06:00:24 crc kubenswrapper[4814]: I0122 06:00:24.430625 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knxb6" event={"ID":"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe","Type":"ContainerStarted","Data":"f9f239ee8fa0ad03d8ee4758ef7dbb402edfdc8373b53879184aa03f075ed2eb"} Jan 22 06:00:24 crc kubenswrapper[4814]: I0122 06:00:24.432210 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:00:25 crc kubenswrapper[4814]: I0122 06:00:25.439566 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knxb6" event={"ID":"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe","Type":"ContainerStarted","Data":"fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a"} Jan 22 06:00:26 crc kubenswrapper[4814]: I0122 06:00:26.454342 4814 generic.go:334] "Generic (PLEG): container finished" podID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerID="fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a" exitCode=0 Jan 22 06:00:26 crc kubenswrapper[4814]: I0122 06:00:26.454582 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knxb6" event={"ID":"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe","Type":"ContainerDied","Data":"fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a"} Jan 22 06:00:27 crc kubenswrapper[4814]: I0122 06:00:27.466442 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knxb6" event={"ID":"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe","Type":"ContainerStarted","Data":"be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe"} Jan 22 06:00:27 crc kubenswrapper[4814]: I0122 06:00:27.488775 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-knxb6" podStartSLOduration=3.053168526 podStartE2EDuration="5.488750297s" podCreationTimestamp="2026-01-22 06:00:22 +0000 UTC" firstStartedPulling="2026-01-22 06:00:24.431980015 +0000 UTC m=+2510.515468230" lastFinishedPulling="2026-01-22 06:00:26.867561776 +0000 UTC m=+2512.951050001" observedRunningTime="2026-01-22 06:00:27.481064437 +0000 UTC m=+2513.564552652" watchObservedRunningTime="2026-01-22 06:00:27.488750297 +0000 UTC m=+2513.572238512" Jan 22 06:00:32 crc kubenswrapper[4814]: I0122 06:00:32.343809 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 06:00:32 crc kubenswrapper[4814]: E0122 06:00:32.344555 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:00:33 crc kubenswrapper[4814]: I0122 06:00:33.060665 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:33 crc kubenswrapper[4814]: I0122 06:00:33.060752 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:33 crc kubenswrapper[4814]: I0122 06:00:33.120336 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:33 crc kubenswrapper[4814]: I0122 06:00:33.569260 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:33 crc kubenswrapper[4814]: I0122 06:00:33.627683 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-knxb6"] Jan 22 06:00:35 crc kubenswrapper[4814]: I0122 06:00:35.649690 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-knxb6" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerName="registry-server" containerID="cri-o://be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe" gracePeriod=2 Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.113576 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.183299 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hz4vk\" (UniqueName: \"kubernetes.io/projected/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-kube-api-access-hz4vk\") pod \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.183337 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-utilities\") pod \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.183402 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-catalog-content\") pod \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\" (UID: \"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe\") " Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.185541 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-utilities" (OuterVolumeSpecName: "utilities") pod "3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" (UID: "3fd93fc4-b78b-4c9a-8f04-b95296a82dfe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.190456 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-kube-api-access-hz4vk" (OuterVolumeSpecName: "kube-api-access-hz4vk") pod "3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" (UID: "3fd93fc4-b78b-4c9a-8f04-b95296a82dfe"). InnerVolumeSpecName "kube-api-access-hz4vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.208576 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" (UID: "3fd93fc4-b78b-4c9a-8f04-b95296a82dfe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.285662 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hz4vk\" (UniqueName: \"kubernetes.io/projected/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-kube-api-access-hz4vk\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.285706 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.285726 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.664423 4814 generic.go:334] "Generic (PLEG): container finished" podID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerID="be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe" exitCode=0 Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.664481 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knxb6" event={"ID":"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe","Type":"ContainerDied","Data":"be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe"} Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.664520 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knxb6" event={"ID":"3fd93fc4-b78b-4c9a-8f04-b95296a82dfe","Type":"ContainerDied","Data":"f9f239ee8fa0ad03d8ee4758ef7dbb402edfdc8373b53879184aa03f075ed2eb"} Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.664541 4814 scope.go:117] "RemoveContainer" containerID="be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.664726 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knxb6" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.705041 4814 scope.go:117] "RemoveContainer" containerID="fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.716003 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-knxb6"] Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.733609 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-knxb6"] Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.755331 4814 scope.go:117] "RemoveContainer" containerID="bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.778985 4814 scope.go:117] "RemoveContainer" containerID="be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe" Jan 22 06:00:36 crc kubenswrapper[4814]: E0122 06:00:36.779369 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe\": container with ID starting with be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe not found: ID does not exist" containerID="be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.779424 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe"} err="failed to get container status \"be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe\": rpc error: code = NotFound desc = could not find container \"be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe\": container with ID starting with be10f3dd60c39c2e40a69f40b6aecfbbdb33b14ff5868e934721f47c19229dfe not found: ID does not exist" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.779468 4814 scope.go:117] "RemoveContainer" containerID="fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a" Jan 22 06:00:36 crc kubenswrapper[4814]: E0122 06:00:36.779746 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a\": container with ID starting with fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a not found: ID does not exist" containerID="fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.779776 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a"} err="failed to get container status \"fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a\": rpc error: code = NotFound desc = could not find container \"fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a\": container with ID starting with fb71a62298365877bc8f2fd2315390e622044a7db31e73516f1eb53e3a84920a not found: ID does not exist" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.779795 4814 scope.go:117] "RemoveContainer" containerID="bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1" Jan 22 06:00:36 crc kubenswrapper[4814]: E0122 06:00:36.780087 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1\": container with ID starting with bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1 not found: ID does not exist" containerID="bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1" Jan 22 06:00:36 crc kubenswrapper[4814]: I0122 06:00:36.780107 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1"} err="failed to get container status \"bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1\": rpc error: code = NotFound desc = could not find container \"bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1\": container with ID starting with bf86e66fe5b923447348717205edc5b11598ff69ee1b310e19f4c81e5b2568b1 not found: ID does not exist" Jan 22 06:00:38 crc kubenswrapper[4814]: I0122 06:00:38.363590 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" path="/var/lib/kubelet/pods/3fd93fc4-b78b-4c9a-8f04-b95296a82dfe/volumes" Jan 22 06:00:43 crc kubenswrapper[4814]: I0122 06:00:43.546438 4814 scope.go:117] "RemoveContainer" containerID="f58812cb612307406e46c22b6aded9552ae2b24ddb1335bd69d86f74abe6bf49" Jan 22 06:00:44 crc kubenswrapper[4814]: I0122 06:00:44.350021 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 06:00:44 crc kubenswrapper[4814]: E0122 06:00:44.350419 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:00:59 crc kubenswrapper[4814]: I0122 06:00:59.344072 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 06:00:59 crc kubenswrapper[4814]: E0122 06:00:59.345883 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.160516 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29484361-6ntzd"] Jan 22 06:01:00 crc kubenswrapper[4814]: E0122 06:01:00.160868 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerName="extract-utilities" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.160884 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerName="extract-utilities" Jan 22 06:01:00 crc kubenswrapper[4814]: E0122 06:01:00.160902 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerName="extract-content" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.160909 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerName="extract-content" Jan 22 06:01:00 crc kubenswrapper[4814]: E0122 06:01:00.160927 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerName="registry-server" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.160933 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerName="registry-server" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.161087 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fd93fc4-b78b-4c9a-8f04-b95296a82dfe" containerName="registry-server" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.161947 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.174249 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29484361-6ntzd"] Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.267539 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-fernet-keys\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.267915 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vn9zz\" (UniqueName: \"kubernetes.io/projected/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-kube-api-access-vn9zz\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.267947 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-combined-ca-bundle\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.268715 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-config-data\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.370449 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-config-data\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.370608 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-fernet-keys\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.370695 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vn9zz\" (UniqueName: \"kubernetes.io/projected/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-kube-api-access-vn9zz\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.370739 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-combined-ca-bundle\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.383927 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-fernet-keys\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.384049 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-combined-ca-bundle\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.386470 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-config-data\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.437486 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vn9zz\" (UniqueName: \"kubernetes.io/projected/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-kube-api-access-vn9zz\") pod \"keystone-cron-29484361-6ntzd\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:00 crc kubenswrapper[4814]: I0122 06:01:00.523185 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:01 crc kubenswrapper[4814]: I0122 06:01:01.091205 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29484361-6ntzd"] Jan 22 06:01:02 crc kubenswrapper[4814]: I0122 06:01:02.021173 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484361-6ntzd" event={"ID":"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc","Type":"ContainerStarted","Data":"60dcab4e0bf9c528ae4071b0860418275315c1fd39fe1d7ebf1e795aed78edf2"} Jan 22 06:01:02 crc kubenswrapper[4814]: I0122 06:01:02.021231 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484361-6ntzd" event={"ID":"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc","Type":"ContainerStarted","Data":"dfb4b9c2dd31d7e7c8114580d3566d3424d5d71e2ccee5c7fdd04b400eef5a24"} Jan 22 06:01:02 crc kubenswrapper[4814]: I0122 06:01:02.043478 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29484361-6ntzd" podStartSLOduration=2.043461921 podStartE2EDuration="2.043461921s" podCreationTimestamp="2026-01-22 06:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:01:02.039127305 +0000 UTC m=+2548.122615530" watchObservedRunningTime="2026-01-22 06:01:02.043461921 +0000 UTC m=+2548.126950136" Jan 22 06:01:04 crc kubenswrapper[4814]: I0122 06:01:04.046873 4814 generic.go:334] "Generic (PLEG): container finished" podID="4c1c33f4-8636-4012-b4d4-9d7afb65a5bc" containerID="60dcab4e0bf9c528ae4071b0860418275315c1fd39fe1d7ebf1e795aed78edf2" exitCode=0 Jan 22 06:01:04 crc kubenswrapper[4814]: I0122 06:01:04.046945 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484361-6ntzd" event={"ID":"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc","Type":"ContainerDied","Data":"60dcab4e0bf9c528ae4071b0860418275315c1fd39fe1d7ebf1e795aed78edf2"} Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.417849 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.573428 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-combined-ca-bundle\") pod \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.573547 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vn9zz\" (UniqueName: \"kubernetes.io/projected/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-kube-api-access-vn9zz\") pod \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.573580 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-config-data\") pod \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.573654 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-fernet-keys\") pod \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\" (UID: \"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc\") " Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.584411 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4c1c33f4-8636-4012-b4d4-9d7afb65a5bc" (UID: "4c1c33f4-8636-4012-b4d4-9d7afb65a5bc"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.610840 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-kube-api-access-vn9zz" (OuterVolumeSpecName: "kube-api-access-vn9zz") pod "4c1c33f4-8636-4012-b4d4-9d7afb65a5bc" (UID: "4c1c33f4-8636-4012-b4d4-9d7afb65a5bc"). InnerVolumeSpecName "kube-api-access-vn9zz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.630751 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c1c33f4-8636-4012-b4d4-9d7afb65a5bc" (UID: "4c1c33f4-8636-4012-b4d4-9d7afb65a5bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.676720 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.676839 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vn9zz\" (UniqueName: \"kubernetes.io/projected/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-kube-api-access-vn9zz\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.676898 4814 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.689769 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-config-data" (OuterVolumeSpecName: "config-data") pod "4c1c33f4-8636-4012-b4d4-9d7afb65a5bc" (UID: "4c1c33f4-8636-4012-b4d4-9d7afb65a5bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:01:05 crc kubenswrapper[4814]: I0122 06:01:05.779164 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c1c33f4-8636-4012-b4d4-9d7afb65a5bc-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:01:06 crc kubenswrapper[4814]: I0122 06:01:06.063957 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484361-6ntzd" event={"ID":"4c1c33f4-8636-4012-b4d4-9d7afb65a5bc","Type":"ContainerDied","Data":"dfb4b9c2dd31d7e7c8114580d3566d3424d5d71e2ccee5c7fdd04b400eef5a24"} Jan 22 06:01:06 crc kubenswrapper[4814]: I0122 06:01:06.064274 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dfb4b9c2dd31d7e7c8114580d3566d3424d5d71e2ccee5c7fdd04b400eef5a24" Jan 22 06:01:06 crc kubenswrapper[4814]: I0122 06:01:06.063996 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484361-6ntzd" Jan 22 06:01:10 crc kubenswrapper[4814]: I0122 06:01:10.385746 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 06:01:10 crc kubenswrapper[4814]: E0122 06:01:10.386378 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:01:23 crc kubenswrapper[4814]: I0122 06:01:23.344478 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 06:01:24 crc kubenswrapper[4814]: I0122 06:01:24.269050 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"88cd0ee7919420de852346364f8a39180c165737b308f2deb64717e85eeb9db9"} Jan 22 06:02:31 crc kubenswrapper[4814]: I0122 06:02:31.037797 4814 generic.go:334] "Generic (PLEG): container finished" podID="ecb3233e-ffca-465f-b74d-ed1f70ae955f" containerID="edb79981c6a3c427036f39408686f9b05c2068ecf4509284857212a3bd217388" exitCode=0 Jan 22 06:02:31 crc kubenswrapper[4814]: I0122 06:02:31.037841 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" event={"ID":"ecb3233e-ffca-465f-b74d-ed1f70ae955f","Type":"ContainerDied","Data":"edb79981c6a3c427036f39408686f9b05c2068ecf4509284857212a3bd217388"} Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.536832 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694071 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-combined-ca-bundle\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694155 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-extra-config-0\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694198 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-1\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694285 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-inventory\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694328 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d827n\" (UniqueName: \"kubernetes.io/projected/ecb3233e-ffca-465f-b74d-ed1f70ae955f-kube-api-access-d827n\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694400 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-ssh-key-openstack-edpm-ipam\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694431 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-0\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694473 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-1\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.694530 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-0\") pod \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\" (UID: \"ecb3233e-ffca-465f-b74d-ed1f70ae955f\") " Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.712138 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.715507 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecb3233e-ffca-465f-b74d-ed1f70ae955f-kube-api-access-d827n" (OuterVolumeSpecName: "kube-api-access-d827n") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "kube-api-access-d827n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.728014 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.735072 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.740777 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.755151 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-inventory" (OuterVolumeSpecName: "inventory") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.755686 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.758111 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.761587 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "ecb3233e-ffca-465f-b74d-ed1f70ae955f" (UID: "ecb3233e-ffca-465f-b74d-ed1f70ae955f"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.796975 4814 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.797005 4814 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.797015 4814 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.797025 4814 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.797035 4814 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.797043 4814 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.797052 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.797062 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d827n\" (UniqueName: \"kubernetes.io/projected/ecb3233e-ffca-465f-b74d-ed1f70ae955f-kube-api-access-d827n\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:32 crc kubenswrapper[4814]: I0122 06:02:32.797069 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ecb3233e-ffca-465f-b74d-ed1f70ae955f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.077717 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" event={"ID":"ecb3233e-ffca-465f-b74d-ed1f70ae955f","Type":"ContainerDied","Data":"f051d95858674a2bc6ac48013e9d9a5c1706fcb3e04b831cd8a42c74f7af4855"} Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.077783 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f051d95858674a2bc6ac48013e9d9a5c1706fcb3e04b831cd8a42c74f7af4855" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.077796 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-rg9l4" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.196275 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg"] Jan 22 06:02:33 crc kubenswrapper[4814]: E0122 06:02:33.196800 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecb3233e-ffca-465f-b74d-ed1f70ae955f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.196823 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecb3233e-ffca-465f-b74d-ed1f70ae955f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 22 06:02:33 crc kubenswrapper[4814]: E0122 06:02:33.196842 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1c33f4-8636-4012-b4d4-9d7afb65a5bc" containerName="keystone-cron" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.196853 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1c33f4-8636-4012-b4d4-9d7afb65a5bc" containerName="keystone-cron" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.197157 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c1c33f4-8636-4012-b4d4-9d7afb65a5bc" containerName="keystone-cron" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.197199 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecb3233e-ffca-465f-b74d-ed1f70ae955f" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.198131 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.204014 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.204548 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.205001 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.205496 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.210377 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rvwf8" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.215438 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg"] Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.305783 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.305856 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.305888 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.305913 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96k8m\" (UniqueName: \"kubernetes.io/projected/14bf03da-292e-4b41-99b2-2410b5f006f4-kube-api-access-96k8m\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.306021 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.306074 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.306228 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.408285 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.408378 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.408476 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.408531 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.408552 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.408573 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96k8m\" (UniqueName: \"kubernetes.io/projected/14bf03da-292e-4b41-99b2-2410b5f006f4-kube-api-access-96k8m\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.408662 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.413246 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.413343 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.415165 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.416109 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.416741 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.418343 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.430283 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96k8m\" (UniqueName: \"kubernetes.io/projected/14bf03da-292e-4b41-99b2-2410b5f006f4-kube-api-access-96k8m\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-47bxg\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:33 crc kubenswrapper[4814]: I0122 06:02:33.559513 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:02:34 crc kubenswrapper[4814]: I0122 06:02:34.222244 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg"] Jan 22 06:02:35 crc kubenswrapper[4814]: I0122 06:02:35.114941 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" event={"ID":"14bf03da-292e-4b41-99b2-2410b5f006f4","Type":"ContainerStarted","Data":"ca1506155849ccd93e944a3b46b90823c8fc41be5394f58d1e330c2139b9ffaa"} Jan 22 06:02:36 crc kubenswrapper[4814]: I0122 06:02:36.126836 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" event={"ID":"14bf03da-292e-4b41-99b2-2410b5f006f4","Type":"ContainerStarted","Data":"d5da2d4fe066d331bd4fb5f22511fdf85c62e70c098afc3f586f2d020a629503"} Jan 22 06:02:36 crc kubenswrapper[4814]: I0122 06:02:36.162104 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" podStartSLOduration=2.375771288 podStartE2EDuration="3.162081638s" podCreationTimestamp="2026-01-22 06:02:33 +0000 UTC" firstStartedPulling="2026-01-22 06:02:34.204604264 +0000 UTC m=+2640.288092489" lastFinishedPulling="2026-01-22 06:02:34.990914604 +0000 UTC m=+2641.074402839" observedRunningTime="2026-01-22 06:02:36.154241763 +0000 UTC m=+2642.237729998" watchObservedRunningTime="2026-01-22 06:02:36.162081638 +0000 UTC m=+2642.245569873" Jan 22 06:03:49 crc kubenswrapper[4814]: I0122 06:03:49.614541 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:03:49 crc kubenswrapper[4814]: I0122 06:03:49.615276 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.592790 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mjmbw"] Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.597143 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.606120 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mjmbw"] Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.771599 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-catalog-content\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.771858 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-utilities\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.772153 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtbww\" (UniqueName: \"kubernetes.io/projected/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-kube-api-access-gtbww\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.874019 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtbww\" (UniqueName: \"kubernetes.io/projected/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-kube-api-access-gtbww\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.874680 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-catalog-content\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.874816 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-utilities\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.875101 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-catalog-content\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.875389 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-utilities\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.892502 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtbww\" (UniqueName: \"kubernetes.io/projected/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-kube-api-access-gtbww\") pod \"redhat-operators-mjmbw\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:53 crc kubenswrapper[4814]: I0122 06:03:53.922824 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:03:54 crc kubenswrapper[4814]: I0122 06:03:54.417278 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mjmbw"] Jan 22 06:03:54 crc kubenswrapper[4814]: I0122 06:03:54.935569 4814 generic.go:334] "Generic (PLEG): container finished" podID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerID="06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b" exitCode=0 Jan 22 06:03:54 crc kubenswrapper[4814]: I0122 06:03:54.935785 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjmbw" event={"ID":"078f1e1f-dda0-4ee0-898b-f775aa5f0f50","Type":"ContainerDied","Data":"06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b"} Jan 22 06:03:54 crc kubenswrapper[4814]: I0122 06:03:54.935904 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjmbw" event={"ID":"078f1e1f-dda0-4ee0-898b-f775aa5f0f50","Type":"ContainerStarted","Data":"a896b843ce8eac84d7b682bd741242a68d4ebdfb4db2e9a078a82a30b9ccede8"} Jan 22 06:03:56 crc kubenswrapper[4814]: I0122 06:03:56.953574 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjmbw" event={"ID":"078f1e1f-dda0-4ee0-898b-f775aa5f0f50","Type":"ContainerStarted","Data":"4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195"} Jan 22 06:03:59 crc kubenswrapper[4814]: I0122 06:03:59.984703 4814 generic.go:334] "Generic (PLEG): container finished" podID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerID="4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195" exitCode=0 Jan 22 06:03:59 crc kubenswrapper[4814]: I0122 06:03:59.984768 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjmbw" event={"ID":"078f1e1f-dda0-4ee0-898b-f775aa5f0f50","Type":"ContainerDied","Data":"4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195"} Jan 22 06:04:00 crc kubenswrapper[4814]: I0122 06:04:00.994763 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjmbw" event={"ID":"078f1e1f-dda0-4ee0-898b-f775aa5f0f50","Type":"ContainerStarted","Data":"4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c"} Jan 22 06:04:01 crc kubenswrapper[4814]: I0122 06:04:01.013462 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mjmbw" podStartSLOduration=2.530312504 podStartE2EDuration="8.01344501s" podCreationTimestamp="2026-01-22 06:03:53 +0000 UTC" firstStartedPulling="2026-01-22 06:03:54.936822174 +0000 UTC m=+2721.020310379" lastFinishedPulling="2026-01-22 06:04:00.41995465 +0000 UTC m=+2726.503442885" observedRunningTime="2026-01-22 06:04:01.010523238 +0000 UTC m=+2727.094011453" watchObservedRunningTime="2026-01-22 06:04:01.01344501 +0000 UTC m=+2727.096933225" Jan 22 06:04:03 crc kubenswrapper[4814]: I0122 06:04:03.924448 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:04:03 crc kubenswrapper[4814]: I0122 06:04:03.925604 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:04:04 crc kubenswrapper[4814]: I0122 06:04:04.971314 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mjmbw" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="registry-server" probeResult="failure" output=< Jan 22 06:04:04 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:04:04 crc kubenswrapper[4814]: > Jan 22 06:04:14 crc kubenswrapper[4814]: I0122 06:04:13.999743 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:04:14 crc kubenswrapper[4814]: I0122 06:04:14.075158 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:04:14 crc kubenswrapper[4814]: I0122 06:04:14.247745 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mjmbw"] Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.132368 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mjmbw" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="registry-server" containerID="cri-o://4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c" gracePeriod=2 Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.614086 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.778087 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtbww\" (UniqueName: \"kubernetes.io/projected/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-kube-api-access-gtbww\") pod \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.778361 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-utilities\") pod \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.778424 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-catalog-content\") pod \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\" (UID: \"078f1e1f-dda0-4ee0-898b-f775aa5f0f50\") " Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.779610 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-utilities" (OuterVolumeSpecName: "utilities") pod "078f1e1f-dda0-4ee0-898b-f775aa5f0f50" (UID: "078f1e1f-dda0-4ee0-898b-f775aa5f0f50"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.792667 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-kube-api-access-gtbww" (OuterVolumeSpecName: "kube-api-access-gtbww") pod "078f1e1f-dda0-4ee0-898b-f775aa5f0f50" (UID: "078f1e1f-dda0-4ee0-898b-f775aa5f0f50"). InnerVolumeSpecName "kube-api-access-gtbww". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.889856 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtbww\" (UniqueName: \"kubernetes.io/projected/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-kube-api-access-gtbww\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.889914 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.969878 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "078f1e1f-dda0-4ee0-898b-f775aa5f0f50" (UID: "078f1e1f-dda0-4ee0-898b-f775aa5f0f50"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:04:15 crc kubenswrapper[4814]: I0122 06:04:15.991656 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/078f1e1f-dda0-4ee0-898b-f775aa5f0f50-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.146113 4814 generic.go:334] "Generic (PLEG): container finished" podID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerID="4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c" exitCode=0 Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.146162 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjmbw" event={"ID":"078f1e1f-dda0-4ee0-898b-f775aa5f0f50","Type":"ContainerDied","Data":"4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c"} Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.146195 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjmbw" event={"ID":"078f1e1f-dda0-4ee0-898b-f775aa5f0f50","Type":"ContainerDied","Data":"a896b843ce8eac84d7b682bd741242a68d4ebdfb4db2e9a078a82a30b9ccede8"} Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.146214 4814 scope.go:117] "RemoveContainer" containerID="4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.146367 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mjmbw" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.195646 4814 scope.go:117] "RemoveContainer" containerID="4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.203230 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mjmbw"] Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.216928 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mjmbw"] Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.260226 4814 scope.go:117] "RemoveContainer" containerID="06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.286644 4814 scope.go:117] "RemoveContainer" containerID="4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c" Jan 22 06:04:16 crc kubenswrapper[4814]: E0122 06:04:16.287206 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c\": container with ID starting with 4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c not found: ID does not exist" containerID="4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.287241 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c"} err="failed to get container status \"4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c\": rpc error: code = NotFound desc = could not find container \"4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c\": container with ID starting with 4aaad5a50284434e7941a58fb34e0b37a863807f724836d4078583e75401622c not found: ID does not exist" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.287262 4814 scope.go:117] "RemoveContainer" containerID="4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195" Jan 22 06:04:16 crc kubenswrapper[4814]: E0122 06:04:16.287857 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195\": container with ID starting with 4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195 not found: ID does not exist" containerID="4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.287961 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195"} err="failed to get container status \"4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195\": rpc error: code = NotFound desc = could not find container \"4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195\": container with ID starting with 4b6de21fcc048c2800184f67f6c7d4d86aeee2994e1b0a4437c71756fda99195 not found: ID does not exist" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.288016 4814 scope.go:117] "RemoveContainer" containerID="06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b" Jan 22 06:04:16 crc kubenswrapper[4814]: E0122 06:04:16.288351 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b\": container with ID starting with 06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b not found: ID does not exist" containerID="06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.288381 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b"} err="failed to get container status \"06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b\": rpc error: code = NotFound desc = could not find container \"06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b\": container with ID starting with 06d91e9505e1fa0875334f1c0cde785b6b966e0fcd11949be112f9466206c73b not found: ID does not exist" Jan 22 06:04:16 crc kubenswrapper[4814]: I0122 06:04:16.354041 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" path="/var/lib/kubelet/pods/078f1e1f-dda0-4ee0-898b-f775aa5f0f50/volumes" Jan 22 06:04:19 crc kubenswrapper[4814]: I0122 06:04:19.613565 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:04:19 crc kubenswrapper[4814]: I0122 06:04:19.614164 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:04:49 crc kubenswrapper[4814]: I0122 06:04:49.613388 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:04:49 crc kubenswrapper[4814]: I0122 06:04:49.613826 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:04:49 crc kubenswrapper[4814]: I0122 06:04:49.613868 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:04:49 crc kubenswrapper[4814]: I0122 06:04:49.614546 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"88cd0ee7919420de852346364f8a39180c165737b308f2deb64717e85eeb9db9"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:04:49 crc kubenswrapper[4814]: I0122 06:04:49.614593 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://88cd0ee7919420de852346364f8a39180c165737b308f2deb64717e85eeb9db9" gracePeriod=600 Jan 22 06:04:50 crc kubenswrapper[4814]: I0122 06:04:50.485044 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="88cd0ee7919420de852346364f8a39180c165737b308f2deb64717e85eeb9db9" exitCode=0 Jan 22 06:04:50 crc kubenswrapper[4814]: I0122 06:04:50.485079 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"88cd0ee7919420de852346364f8a39180c165737b308f2deb64717e85eeb9db9"} Jan 22 06:04:50 crc kubenswrapper[4814]: I0122 06:04:50.485496 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101"} Jan 22 06:04:50 crc kubenswrapper[4814]: I0122 06:04:50.485513 4814 scope.go:117] "RemoveContainer" containerID="388151db9ee494357006d90cf48bc23e7800b58337e9ef0ebbe1594fe0bdb3e0" Jan 22 06:05:33 crc kubenswrapper[4814]: I0122 06:05:33.885838 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-czh2f"] Jan 22 06:05:33 crc kubenswrapper[4814]: E0122 06:05:33.887170 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="registry-server" Jan 22 06:05:33 crc kubenswrapper[4814]: I0122 06:05:33.887193 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="registry-server" Jan 22 06:05:33 crc kubenswrapper[4814]: E0122 06:05:33.887236 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="extract-utilities" Jan 22 06:05:33 crc kubenswrapper[4814]: I0122 06:05:33.887250 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="extract-utilities" Jan 22 06:05:33 crc kubenswrapper[4814]: E0122 06:05:33.887270 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="extract-content" Jan 22 06:05:33 crc kubenswrapper[4814]: I0122 06:05:33.887284 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="extract-content" Jan 22 06:05:33 crc kubenswrapper[4814]: I0122 06:05:33.887717 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="078f1e1f-dda0-4ee0-898b-f775aa5f0f50" containerName="registry-server" Jan 22 06:05:33 crc kubenswrapper[4814]: I0122 06:05:33.890221 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:33 crc kubenswrapper[4814]: I0122 06:05:33.918451 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-czh2f"] Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.050024 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-catalog-content\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.051214 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dss2c\" (UniqueName: \"kubernetes.io/projected/dea91716-8929-46a6-a99a-50eef8fc9b34-kube-api-access-dss2c\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.051481 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-utilities\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.153341 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dss2c\" (UniqueName: \"kubernetes.io/projected/dea91716-8929-46a6-a99a-50eef8fc9b34-kube-api-access-dss2c\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.153479 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-utilities\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.153583 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-catalog-content\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.154299 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-catalog-content\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.154594 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-utilities\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.178528 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dss2c\" (UniqueName: \"kubernetes.io/projected/dea91716-8929-46a6-a99a-50eef8fc9b34-kube-api-access-dss2c\") pod \"community-operators-czh2f\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:34 crc kubenswrapper[4814]: I0122 06:05:34.215673 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:35 crc kubenswrapper[4814]: I0122 06:05:35.256224 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-czh2f"] Jan 22 06:05:36 crc kubenswrapper[4814]: I0122 06:05:36.004592 4814 generic.go:334] "Generic (PLEG): container finished" podID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerID="49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2" exitCode=0 Jan 22 06:05:36 crc kubenswrapper[4814]: I0122 06:05:36.004775 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czh2f" event={"ID":"dea91716-8929-46a6-a99a-50eef8fc9b34","Type":"ContainerDied","Data":"49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2"} Jan 22 06:05:36 crc kubenswrapper[4814]: I0122 06:05:36.005321 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czh2f" event={"ID":"dea91716-8929-46a6-a99a-50eef8fc9b34","Type":"ContainerStarted","Data":"f11cd8f950313066930dd8da7ab3cbc1e32ca54c208e1666c07e2030ee4a5d6e"} Jan 22 06:05:36 crc kubenswrapper[4814]: I0122 06:05:36.007977 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:05:39 crc kubenswrapper[4814]: I0122 06:05:39.032921 4814 generic.go:334] "Generic (PLEG): container finished" podID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerID="ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1" exitCode=0 Jan 22 06:05:39 crc kubenswrapper[4814]: I0122 06:05:39.033503 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czh2f" event={"ID":"dea91716-8929-46a6-a99a-50eef8fc9b34","Type":"ContainerDied","Data":"ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1"} Jan 22 06:05:41 crc kubenswrapper[4814]: I0122 06:05:41.052390 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czh2f" event={"ID":"dea91716-8929-46a6-a99a-50eef8fc9b34","Type":"ContainerStarted","Data":"ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2"} Jan 22 06:05:41 crc kubenswrapper[4814]: I0122 06:05:41.081052 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-czh2f" podStartSLOduration=3.975022138 podStartE2EDuration="8.081033146s" podCreationTimestamp="2026-01-22 06:05:33 +0000 UTC" firstStartedPulling="2026-01-22 06:05:36.007585806 +0000 UTC m=+2822.091074031" lastFinishedPulling="2026-01-22 06:05:40.113596824 +0000 UTC m=+2826.197085039" observedRunningTime="2026-01-22 06:05:41.072930779 +0000 UTC m=+2827.156418994" watchObservedRunningTime="2026-01-22 06:05:41.081033146 +0000 UTC m=+2827.164521361" Jan 22 06:05:44 crc kubenswrapper[4814]: I0122 06:05:44.216237 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:44 crc kubenswrapper[4814]: I0122 06:05:44.216602 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:44 crc kubenswrapper[4814]: I0122 06:05:44.336087 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:45 crc kubenswrapper[4814]: I0122 06:05:45.177119 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:45 crc kubenswrapper[4814]: I0122 06:05:45.240221 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-czh2f"] Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.111653 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-czh2f" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerName="registry-server" containerID="cri-o://ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2" gracePeriod=2 Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.585432 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.666341 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-utilities\") pod \"dea91716-8929-46a6-a99a-50eef8fc9b34\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.666407 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dss2c\" (UniqueName: \"kubernetes.io/projected/dea91716-8929-46a6-a99a-50eef8fc9b34-kube-api-access-dss2c\") pod \"dea91716-8929-46a6-a99a-50eef8fc9b34\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.666489 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-catalog-content\") pod \"dea91716-8929-46a6-a99a-50eef8fc9b34\" (UID: \"dea91716-8929-46a6-a99a-50eef8fc9b34\") " Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.667314 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-utilities" (OuterVolumeSpecName: "utilities") pod "dea91716-8929-46a6-a99a-50eef8fc9b34" (UID: "dea91716-8929-46a6-a99a-50eef8fc9b34"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.672651 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dea91716-8929-46a6-a99a-50eef8fc9b34-kube-api-access-dss2c" (OuterVolumeSpecName: "kube-api-access-dss2c") pod "dea91716-8929-46a6-a99a-50eef8fc9b34" (UID: "dea91716-8929-46a6-a99a-50eef8fc9b34"). InnerVolumeSpecName "kube-api-access-dss2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.731009 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dea91716-8929-46a6-a99a-50eef8fc9b34" (UID: "dea91716-8929-46a6-a99a-50eef8fc9b34"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.768638 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.768691 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dea91716-8929-46a6-a99a-50eef8fc9b34-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:47 crc kubenswrapper[4814]: I0122 06:05:47.768701 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dss2c\" (UniqueName: \"kubernetes.io/projected/dea91716-8929-46a6-a99a-50eef8fc9b34-kube-api-access-dss2c\") on node \"crc\" DevicePath \"\"" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.119981 4814 generic.go:334] "Generic (PLEG): container finished" podID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerID="ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2" exitCode=0 Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.120019 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czh2f" event={"ID":"dea91716-8929-46a6-a99a-50eef8fc9b34","Type":"ContainerDied","Data":"ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2"} Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.120029 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-czh2f" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.120048 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czh2f" event={"ID":"dea91716-8929-46a6-a99a-50eef8fc9b34","Type":"ContainerDied","Data":"f11cd8f950313066930dd8da7ab3cbc1e32ca54c208e1666c07e2030ee4a5d6e"} Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.120064 4814 scope.go:117] "RemoveContainer" containerID="ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.139214 4814 scope.go:117] "RemoveContainer" containerID="ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.156210 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-czh2f"] Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.163824 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-czh2f"] Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.164766 4814 scope.go:117] "RemoveContainer" containerID="49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.207446 4814 scope.go:117] "RemoveContainer" containerID="ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2" Jan 22 06:05:48 crc kubenswrapper[4814]: E0122 06:05:48.207928 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2\": container with ID starting with ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2 not found: ID does not exist" containerID="ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.207970 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2"} err="failed to get container status \"ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2\": rpc error: code = NotFound desc = could not find container \"ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2\": container with ID starting with ff21036fd2749e3b54575883b9f4a01b890b02506da0d7b5711a8c54765c28d2 not found: ID does not exist" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.208000 4814 scope.go:117] "RemoveContainer" containerID="ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1" Jan 22 06:05:48 crc kubenswrapper[4814]: E0122 06:05:48.208464 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1\": container with ID starting with ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1 not found: ID does not exist" containerID="ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.208501 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1"} err="failed to get container status \"ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1\": rpc error: code = NotFound desc = could not find container \"ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1\": container with ID starting with ca736de10d96740bf78fb5ba4539fa6dd94c17143ab1cfcbbfb8a5b9a46252e1 not found: ID does not exist" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.208520 4814 scope.go:117] "RemoveContainer" containerID="49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2" Jan 22 06:05:48 crc kubenswrapper[4814]: E0122 06:05:48.208826 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2\": container with ID starting with 49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2 not found: ID does not exist" containerID="49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.208844 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2"} err="failed to get container status \"49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2\": rpc error: code = NotFound desc = could not find container \"49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2\": container with ID starting with 49f25711ae761adc15a1b3ac51d98c52b5eed46e70c381608f505f0153a842f2 not found: ID does not exist" Jan 22 06:05:48 crc kubenswrapper[4814]: I0122 06:05:48.354682 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" path="/var/lib/kubelet/pods/dea91716-8929-46a6-a99a-50eef8fc9b34/volumes" Jan 22 06:06:00 crc kubenswrapper[4814]: I0122 06:06:00.266544 4814 generic.go:334] "Generic (PLEG): container finished" podID="14bf03da-292e-4b41-99b2-2410b5f006f4" containerID="d5da2d4fe066d331bd4fb5f22511fdf85c62e70c098afc3f586f2d020a629503" exitCode=0 Jan 22 06:06:00 crc kubenswrapper[4814]: I0122 06:06:00.266654 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" event={"ID":"14bf03da-292e-4b41-99b2-2410b5f006f4","Type":"ContainerDied","Data":"d5da2d4fe066d331bd4fb5f22511fdf85c62e70c098afc3f586f2d020a629503"} Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.748599 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.858222 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-inventory\") pod \"14bf03da-292e-4b41-99b2-2410b5f006f4\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.858298 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ssh-key-openstack-edpm-ipam\") pod \"14bf03da-292e-4b41-99b2-2410b5f006f4\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.858340 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-telemetry-combined-ca-bundle\") pod \"14bf03da-292e-4b41-99b2-2410b5f006f4\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.858389 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-1\") pod \"14bf03da-292e-4b41-99b2-2410b5f006f4\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.858445 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96k8m\" (UniqueName: \"kubernetes.io/projected/14bf03da-292e-4b41-99b2-2410b5f006f4-kube-api-access-96k8m\") pod \"14bf03da-292e-4b41-99b2-2410b5f006f4\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.858483 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-2\") pod \"14bf03da-292e-4b41-99b2-2410b5f006f4\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.858548 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-0\") pod \"14bf03da-292e-4b41-99b2-2410b5f006f4\" (UID: \"14bf03da-292e-4b41-99b2-2410b5f006f4\") " Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.865978 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14bf03da-292e-4b41-99b2-2410b5f006f4-kube-api-access-96k8m" (OuterVolumeSpecName: "kube-api-access-96k8m") pod "14bf03da-292e-4b41-99b2-2410b5f006f4" (UID: "14bf03da-292e-4b41-99b2-2410b5f006f4"). InnerVolumeSpecName "kube-api-access-96k8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.870981 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "14bf03da-292e-4b41-99b2-2410b5f006f4" (UID: "14bf03da-292e-4b41-99b2-2410b5f006f4"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.891269 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "14bf03da-292e-4b41-99b2-2410b5f006f4" (UID: "14bf03da-292e-4b41-99b2-2410b5f006f4"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.891332 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "14bf03da-292e-4b41-99b2-2410b5f006f4" (UID: "14bf03da-292e-4b41-99b2-2410b5f006f4"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.899402 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "14bf03da-292e-4b41-99b2-2410b5f006f4" (UID: "14bf03da-292e-4b41-99b2-2410b5f006f4"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.904044 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "14bf03da-292e-4b41-99b2-2410b5f006f4" (UID: "14bf03da-292e-4b41-99b2-2410b5f006f4"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.920946 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-inventory" (OuterVolumeSpecName: "inventory") pod "14bf03da-292e-4b41-99b2-2410b5f006f4" (UID: "14bf03da-292e-4b41-99b2-2410b5f006f4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.962310 4814 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.962354 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96k8m\" (UniqueName: \"kubernetes.io/projected/14bf03da-292e-4b41-99b2-2410b5f006f4-kube-api-access-96k8m\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.962366 4814 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.962379 4814 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.962390 4814 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-inventory\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.962402 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:01 crc kubenswrapper[4814]: I0122 06:06:01.962414 4814 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bf03da-292e-4b41-99b2-2410b5f006f4-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:02 crc kubenswrapper[4814]: I0122 06:06:02.288800 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" event={"ID":"14bf03da-292e-4b41-99b2-2410b5f006f4","Type":"ContainerDied","Data":"ca1506155849ccd93e944a3b46b90823c8fc41be5394f58d1e330c2139b9ffaa"} Jan 22 06:06:02 crc kubenswrapper[4814]: I0122 06:06:02.288850 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca1506155849ccd93e944a3b46b90823c8fc41be5394f58d1e330c2139b9ffaa" Jan 22 06:06:02 crc kubenswrapper[4814]: I0122 06:06:02.288910 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-47bxg" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.265527 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cwqz9"] Jan 22 06:06:09 crc kubenswrapper[4814]: E0122 06:06:09.267573 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14bf03da-292e-4b41-99b2-2410b5f006f4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.267597 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="14bf03da-292e-4b41-99b2-2410b5f006f4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 22 06:06:09 crc kubenswrapper[4814]: E0122 06:06:09.267640 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerName="extract-utilities" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.267653 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerName="extract-utilities" Jan 22 06:06:09 crc kubenswrapper[4814]: E0122 06:06:09.267672 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerName="registry-server" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.267680 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerName="registry-server" Jan 22 06:06:09 crc kubenswrapper[4814]: E0122 06:06:09.267706 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerName="extract-content" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.267714 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerName="extract-content" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.267954 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="14bf03da-292e-4b41-99b2-2410b5f006f4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.267979 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="dea91716-8929-46a6-a99a-50eef8fc9b34" containerName="registry-server" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.269547 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.289296 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cwqz9"] Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.444089 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-utilities\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.444221 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btbfj\" (UniqueName: \"kubernetes.io/projected/2020bf10-af35-402c-a63e-a24d44641adf-kube-api-access-btbfj\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.444326 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-catalog-content\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.545601 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-catalog-content\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.545763 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-utilities\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.545848 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btbfj\" (UniqueName: \"kubernetes.io/projected/2020bf10-af35-402c-a63e-a24d44641adf-kube-api-access-btbfj\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.546804 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-catalog-content\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.547961 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-utilities\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.566568 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btbfj\" (UniqueName: \"kubernetes.io/projected/2020bf10-af35-402c-a63e-a24d44641adf-kube-api-access-btbfj\") pod \"certified-operators-cwqz9\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:09 crc kubenswrapper[4814]: I0122 06:06:09.599400 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:10 crc kubenswrapper[4814]: I0122 06:06:10.172863 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cwqz9"] Jan 22 06:06:10 crc kubenswrapper[4814]: W0122 06:06:10.190786 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2020bf10_af35_402c_a63e_a24d44641adf.slice/crio-f750bb31a7c042ad1cd174e39af0abf540a22d7df89fcf8d2cd02cfaf00a9967 WatchSource:0}: Error finding container f750bb31a7c042ad1cd174e39af0abf540a22d7df89fcf8d2cd02cfaf00a9967: Status 404 returned error can't find the container with id f750bb31a7c042ad1cd174e39af0abf540a22d7df89fcf8d2cd02cfaf00a9967 Jan 22 06:06:10 crc kubenswrapper[4814]: I0122 06:06:10.414154 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwqz9" event={"ID":"2020bf10-af35-402c-a63e-a24d44641adf","Type":"ContainerStarted","Data":"f750bb31a7c042ad1cd174e39af0abf540a22d7df89fcf8d2cd02cfaf00a9967"} Jan 22 06:06:11 crc kubenswrapper[4814]: I0122 06:06:11.426373 4814 generic.go:334] "Generic (PLEG): container finished" podID="2020bf10-af35-402c-a63e-a24d44641adf" containerID="1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf" exitCode=0 Jan 22 06:06:11 crc kubenswrapper[4814]: I0122 06:06:11.426499 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwqz9" event={"ID":"2020bf10-af35-402c-a63e-a24d44641adf","Type":"ContainerDied","Data":"1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf"} Jan 22 06:06:13 crc kubenswrapper[4814]: I0122 06:06:13.448181 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwqz9" event={"ID":"2020bf10-af35-402c-a63e-a24d44641adf","Type":"ContainerStarted","Data":"8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7"} Jan 22 06:06:15 crc kubenswrapper[4814]: I0122 06:06:15.470838 4814 generic.go:334] "Generic (PLEG): container finished" podID="2020bf10-af35-402c-a63e-a24d44641adf" containerID="8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7" exitCode=0 Jan 22 06:06:15 crc kubenswrapper[4814]: I0122 06:06:15.470910 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwqz9" event={"ID":"2020bf10-af35-402c-a63e-a24d44641adf","Type":"ContainerDied","Data":"8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7"} Jan 22 06:06:16 crc kubenswrapper[4814]: I0122 06:06:16.482128 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwqz9" event={"ID":"2020bf10-af35-402c-a63e-a24d44641adf","Type":"ContainerStarted","Data":"d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9"} Jan 22 06:06:16 crc kubenswrapper[4814]: I0122 06:06:16.507029 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cwqz9" podStartSLOduration=2.8538862910000002 podStartE2EDuration="7.507004268s" podCreationTimestamp="2026-01-22 06:06:09 +0000 UTC" firstStartedPulling="2026-01-22 06:06:11.429257103 +0000 UTC m=+2857.512745338" lastFinishedPulling="2026-01-22 06:06:16.08237507 +0000 UTC m=+2862.165863315" observedRunningTime="2026-01-22 06:06:16.50612877 +0000 UTC m=+2862.589617005" watchObservedRunningTime="2026-01-22 06:06:16.507004268 +0000 UTC m=+2862.590492483" Jan 22 06:06:19 crc kubenswrapper[4814]: I0122 06:06:19.600461 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:19 crc kubenswrapper[4814]: I0122 06:06:19.600936 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:19 crc kubenswrapper[4814]: I0122 06:06:19.684323 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:21 crc kubenswrapper[4814]: I0122 06:06:21.601482 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:21 crc kubenswrapper[4814]: I0122 06:06:21.666967 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cwqz9"] Jan 22 06:06:23 crc kubenswrapper[4814]: I0122 06:06:23.543961 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cwqz9" podUID="2020bf10-af35-402c-a63e-a24d44641adf" containerName="registry-server" containerID="cri-o://d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9" gracePeriod=2 Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.545145 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.555699 4814 generic.go:334] "Generic (PLEG): container finished" podID="2020bf10-af35-402c-a63e-a24d44641adf" containerID="d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9" exitCode=0 Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.555760 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwqz9" event={"ID":"2020bf10-af35-402c-a63e-a24d44641adf","Type":"ContainerDied","Data":"d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9"} Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.555766 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cwqz9" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.555788 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cwqz9" event={"ID":"2020bf10-af35-402c-a63e-a24d44641adf","Type":"ContainerDied","Data":"f750bb31a7c042ad1cd174e39af0abf540a22d7df89fcf8d2cd02cfaf00a9967"} Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.555806 4814 scope.go:117] "RemoveContainer" containerID="d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.613773 4814 scope.go:117] "RemoveContainer" containerID="8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.647924 4814 scope.go:117] "RemoveContainer" containerID="1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.660495 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-utilities\") pod \"2020bf10-af35-402c-a63e-a24d44641adf\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.660857 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btbfj\" (UniqueName: \"kubernetes.io/projected/2020bf10-af35-402c-a63e-a24d44641adf-kube-api-access-btbfj\") pod \"2020bf10-af35-402c-a63e-a24d44641adf\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.660922 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-catalog-content\") pod \"2020bf10-af35-402c-a63e-a24d44641adf\" (UID: \"2020bf10-af35-402c-a63e-a24d44641adf\") " Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.662701 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-utilities" (OuterVolumeSpecName: "utilities") pod "2020bf10-af35-402c-a63e-a24d44641adf" (UID: "2020bf10-af35-402c-a63e-a24d44641adf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.668972 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2020bf10-af35-402c-a63e-a24d44641adf-kube-api-access-btbfj" (OuterVolumeSpecName: "kube-api-access-btbfj") pod "2020bf10-af35-402c-a63e-a24d44641adf" (UID: "2020bf10-af35-402c-a63e-a24d44641adf"). InnerVolumeSpecName "kube-api-access-btbfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.710567 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2020bf10-af35-402c-a63e-a24d44641adf" (UID: "2020bf10-af35-402c-a63e-a24d44641adf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.731345 4814 scope.go:117] "RemoveContainer" containerID="d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9" Jan 22 06:06:24 crc kubenswrapper[4814]: E0122 06:06:24.732058 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9\": container with ID starting with d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9 not found: ID does not exist" containerID="d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.732085 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9"} err="failed to get container status \"d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9\": rpc error: code = NotFound desc = could not find container \"d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9\": container with ID starting with d75b790943d4b01988169aed732b8c23650ac1b2ac4a783fd43c6e794b32d6c9 not found: ID does not exist" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.732104 4814 scope.go:117] "RemoveContainer" containerID="8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7" Jan 22 06:06:24 crc kubenswrapper[4814]: E0122 06:06:24.732670 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7\": container with ID starting with 8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7 not found: ID does not exist" containerID="8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.732730 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7"} err="failed to get container status \"8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7\": rpc error: code = NotFound desc = could not find container \"8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7\": container with ID starting with 8fd6f59b108923892c0b0203bacd17837a779a7e17f337bcdc60635822a89da7 not found: ID does not exist" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.732770 4814 scope.go:117] "RemoveContainer" containerID="1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf" Jan 22 06:06:24 crc kubenswrapper[4814]: E0122 06:06:24.733122 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf\": container with ID starting with 1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf not found: ID does not exist" containerID="1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.733225 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf"} err="failed to get container status \"1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf\": rpc error: code = NotFound desc = could not find container \"1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf\": container with ID starting with 1969f3ab662b371fbec149bcd34273fe493fd6935bf3c13844ea7fe8bf812acf not found: ID does not exist" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.763638 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btbfj\" (UniqueName: \"kubernetes.io/projected/2020bf10-af35-402c-a63e-a24d44641adf-kube-api-access-btbfj\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.763672 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.763680 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2020bf10-af35-402c-a63e-a24d44641adf-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.908399 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cwqz9"] Jan 22 06:06:24 crc kubenswrapper[4814]: I0122 06:06:24.923108 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cwqz9"] Jan 22 06:06:26 crc kubenswrapper[4814]: I0122 06:06:26.359768 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2020bf10-af35-402c-a63e-a24d44641adf" path="/var/lib/kubelet/pods/2020bf10-af35-402c-a63e-a24d44641adf/volumes" Jan 22 06:06:49 crc kubenswrapper[4814]: I0122 06:06:49.614795 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:06:49 crc kubenswrapper[4814]: I0122 06:06:49.615588 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.673431 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Jan 22 06:07:04 crc kubenswrapper[4814]: E0122 06:07:04.674958 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2020bf10-af35-402c-a63e-a24d44641adf" containerName="extract-content" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.674992 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2020bf10-af35-402c-a63e-a24d44641adf" containerName="extract-content" Jan 22 06:07:04 crc kubenswrapper[4814]: E0122 06:07:04.675024 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2020bf10-af35-402c-a63e-a24d44641adf" containerName="extract-utilities" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.675038 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2020bf10-af35-402c-a63e-a24d44641adf" containerName="extract-utilities" Jan 22 06:07:04 crc kubenswrapper[4814]: E0122 06:07:04.675062 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2020bf10-af35-402c-a63e-a24d44641adf" containerName="registry-server" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.675074 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2020bf10-af35-402c-a63e-a24d44641adf" containerName="registry-server" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.675417 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2020bf10-af35-402c-a63e-a24d44641adf" containerName="registry-server" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.676450 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.684724 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.684776 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tjss5" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.685092 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.685283 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.695537 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.773900 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.774084 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.774154 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.774289 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.774470 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.774511 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.774556 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.774601 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.774807 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gghk8\" (UniqueName: \"kubernetes.io/projected/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-kube-api-access-gghk8\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.876703 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877029 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877063 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877118 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877143 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877179 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877221 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877300 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gghk8\" (UniqueName: \"kubernetes.io/projected/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-kube-api-access-gghk8\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877355 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.877750 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.879288 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.879380 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-config-data\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.879736 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.880236 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.889757 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ssh-key\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.890972 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config-secret\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.891596 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ca-certs\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.923708 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gghk8\" (UniqueName: \"kubernetes.io/projected/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-kube-api-access-gghk8\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:04 crc kubenswrapper[4814]: I0122 06:07:04.931769 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest-s00-multi-thread-testing\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:05 crc kubenswrapper[4814]: I0122 06:07:05.014144 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:07:05 crc kubenswrapper[4814]: I0122 06:07:05.692189 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s00-multi-thread-testing"] Jan 22 06:07:06 crc kubenswrapper[4814]: I0122 06:07:06.079811 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"0aa6d92e-884d-41d0-a26e-3e5de31c05a3","Type":"ContainerStarted","Data":"33912a11c5e30aaf2d689fd70de9a11a4507145c1935b81dc56a21a43dc73cd0"} Jan 22 06:07:19 crc kubenswrapper[4814]: I0122 06:07:19.613937 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:07:19 crc kubenswrapper[4814]: I0122 06:07:19.615548 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:07:49 crc kubenswrapper[4814]: I0122 06:07:49.614058 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:07:49 crc kubenswrapper[4814]: I0122 06:07:49.614557 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:07:49 crc kubenswrapper[4814]: I0122 06:07:49.614606 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:07:49 crc kubenswrapper[4814]: I0122 06:07:49.615301 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:07:49 crc kubenswrapper[4814]: I0122 06:07:49.615361 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" gracePeriod=600 Jan 22 06:07:51 crc kubenswrapper[4814]: I0122 06:07:51.610931 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" exitCode=0 Jan 22 06:07:51 crc kubenswrapper[4814]: I0122 06:07:51.610996 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101"} Jan 22 06:07:51 crc kubenswrapper[4814]: I0122 06:07:51.611271 4814 scope.go:117] "RemoveContainer" containerID="88cd0ee7919420de852346364f8a39180c165737b308f2deb64717e85eeb9db9" Jan 22 06:08:02 crc kubenswrapper[4814]: E0122 06:08:02.795073 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:08:02 crc kubenswrapper[4814]: E0122 06:08:02.918305 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:c3923531bcda0b0811b2d5053f189beb" Jan 22 06:08:02 crc kubenswrapper[4814]: E0122 06:08:02.918364 4814 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:c3923531bcda0b0811b2d5053f189beb" Jan 22 06:08:02 crc kubenswrapper[4814]: E0122 06:08:02.919829 4814 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:c3923531bcda0b0811b2d5053f189beb,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gghk8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest-s00-multi-thread-testing_openstack(0aa6d92e-884d-41d0-a26e-3e5de31c05a3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 22 06:08:02 crc kubenswrapper[4814]: E0122 06:08:02.921040 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podUID="0aa6d92e-884d-41d0-a26e-3e5de31c05a3" Jan 22 06:08:03 crc kubenswrapper[4814]: I0122 06:08:03.755658 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:08:03 crc kubenswrapper[4814]: E0122 06:08:03.756855 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:08:03 crc kubenswrapper[4814]: E0122 06:08:03.757495 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-antelope-centos9/openstack-tempest-all:c3923531bcda0b0811b2d5053f189beb\\\"\"" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podUID="0aa6d92e-884d-41d0-a26e-3e5de31c05a3" Jan 22 06:08:15 crc kubenswrapper[4814]: I0122 06:08:15.344360 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:08:15 crc kubenswrapper[4814]: E0122 06:08:15.345621 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:08:17 crc kubenswrapper[4814]: I0122 06:08:17.590680 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 22 06:08:18 crc kubenswrapper[4814]: I0122 06:08:18.936286 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"0aa6d92e-884d-41d0-a26e-3e5de31c05a3","Type":"ContainerStarted","Data":"445f73cae808bbd750a0b4628dcd89d656795206f547f3c8f0863b59ae637e69"} Jan 22 06:08:18 crc kubenswrapper[4814]: I0122 06:08:18.956596 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" podStartSLOduration=4.063935247 podStartE2EDuration="1m15.956572675s" podCreationTimestamp="2026-01-22 06:07:03 +0000 UTC" firstStartedPulling="2026-01-22 06:07:05.693556474 +0000 UTC m=+2911.777044689" lastFinishedPulling="2026-01-22 06:08:17.586193892 +0000 UTC m=+2983.669682117" observedRunningTime="2026-01-22 06:08:18.953359184 +0000 UTC m=+2985.036847439" watchObservedRunningTime="2026-01-22 06:08:18.956572675 +0000 UTC m=+2985.040060930" Jan 22 06:08:26 crc kubenswrapper[4814]: I0122 06:08:26.344441 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:08:26 crc kubenswrapper[4814]: E0122 06:08:26.345486 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:08:38 crc kubenswrapper[4814]: I0122 06:08:38.345062 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:08:38 crc kubenswrapper[4814]: E0122 06:08:38.346378 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:08:49 crc kubenswrapper[4814]: I0122 06:08:49.346727 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:08:49 crc kubenswrapper[4814]: E0122 06:08:49.347571 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:09:04 crc kubenswrapper[4814]: I0122 06:09:04.348685 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:09:04 crc kubenswrapper[4814]: E0122 06:09:04.349358 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:09:17 crc kubenswrapper[4814]: I0122 06:09:17.343771 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:09:17 crc kubenswrapper[4814]: E0122 06:09:17.344416 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:09:31 crc kubenswrapper[4814]: I0122 06:09:31.343910 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:09:31 crc kubenswrapper[4814]: E0122 06:09:31.344666 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:09:42 crc kubenswrapper[4814]: I0122 06:09:42.344486 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:09:42 crc kubenswrapper[4814]: E0122 06:09:42.345296 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:09:53 crc kubenswrapper[4814]: I0122 06:09:53.344071 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:09:53 crc kubenswrapper[4814]: E0122 06:09:53.344823 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:10:06 crc kubenswrapper[4814]: I0122 06:10:06.356573 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:10:06 crc kubenswrapper[4814]: E0122 06:10:06.357297 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:10:18 crc kubenswrapper[4814]: I0122 06:10:18.344880 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:10:18 crc kubenswrapper[4814]: E0122 06:10:18.345859 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.268865 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cs4l5"] Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.273810 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.365281 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-catalog-content\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.366235 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-utilities\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.366409 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzhnz\" (UniqueName: \"kubernetes.io/projected/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-kube-api-access-mzhnz\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.366961 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cs4l5"] Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.468597 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-catalog-content\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.469015 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-utilities\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.469062 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzhnz\" (UniqueName: \"kubernetes.io/projected/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-kube-api-access-mzhnz\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.469114 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-catalog-content\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.469468 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-utilities\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.490249 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzhnz\" (UniqueName: \"kubernetes.io/projected/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-kube-api-access-mzhnz\") pod \"redhat-marketplace-cs4l5\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:27 crc kubenswrapper[4814]: I0122 06:10:27.599034 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:28 crc kubenswrapper[4814]: I0122 06:10:28.272195 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cs4l5"] Jan 22 06:10:29 crc kubenswrapper[4814]: I0122 06:10:29.147867 4814 generic.go:334] "Generic (PLEG): container finished" podID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerID="1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c" exitCode=0 Jan 22 06:10:29 crc kubenswrapper[4814]: I0122 06:10:29.147974 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cs4l5" event={"ID":"c7c71a2b-ff71-49b6-b5a1-88851e29aff7","Type":"ContainerDied","Data":"1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c"} Jan 22 06:10:29 crc kubenswrapper[4814]: I0122 06:10:29.148481 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cs4l5" event={"ID":"c7c71a2b-ff71-49b6-b5a1-88851e29aff7","Type":"ContainerStarted","Data":"9ab21e37e3ee8c7f2987902ceb8ecb4a112c7c5183056f1bd1c1be845fda01bf"} Jan 22 06:10:30 crc kubenswrapper[4814]: I0122 06:10:30.163987 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cs4l5" event={"ID":"c7c71a2b-ff71-49b6-b5a1-88851e29aff7","Type":"ContainerStarted","Data":"ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd"} Jan 22 06:10:30 crc kubenswrapper[4814]: I0122 06:10:30.343715 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:10:30 crc kubenswrapper[4814]: E0122 06:10:30.343966 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:10:31 crc kubenswrapper[4814]: I0122 06:10:31.174965 4814 generic.go:334] "Generic (PLEG): container finished" podID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerID="ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd" exitCode=0 Jan 22 06:10:31 crc kubenswrapper[4814]: I0122 06:10:31.175016 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cs4l5" event={"ID":"c7c71a2b-ff71-49b6-b5a1-88851e29aff7","Type":"ContainerDied","Data":"ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd"} Jan 22 06:10:32 crc kubenswrapper[4814]: I0122 06:10:32.184084 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cs4l5" event={"ID":"c7c71a2b-ff71-49b6-b5a1-88851e29aff7","Type":"ContainerStarted","Data":"78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021"} Jan 22 06:10:32 crc kubenswrapper[4814]: I0122 06:10:32.208041 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cs4l5" podStartSLOduration=2.703841284 podStartE2EDuration="5.208023209s" podCreationTimestamp="2026-01-22 06:10:27 +0000 UTC" firstStartedPulling="2026-01-22 06:10:29.149411631 +0000 UTC m=+3115.232899846" lastFinishedPulling="2026-01-22 06:10:31.653593556 +0000 UTC m=+3117.737081771" observedRunningTime="2026-01-22 06:10:32.200271437 +0000 UTC m=+3118.283759642" watchObservedRunningTime="2026-01-22 06:10:32.208023209 +0000 UTC m=+3118.291511424" Jan 22 06:10:37 crc kubenswrapper[4814]: I0122 06:10:37.613998 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:37 crc kubenswrapper[4814]: I0122 06:10:37.614447 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:37 crc kubenswrapper[4814]: I0122 06:10:37.667443 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:38 crc kubenswrapper[4814]: I0122 06:10:38.324102 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:38 crc kubenswrapper[4814]: I0122 06:10:38.387856 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cs4l5"] Jan 22 06:10:40 crc kubenswrapper[4814]: I0122 06:10:40.277496 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cs4l5" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerName="registry-server" containerID="cri-o://78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021" gracePeriod=2 Jan 22 06:10:40 crc kubenswrapper[4814]: I0122 06:10:40.845133 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:40 crc kubenswrapper[4814]: I0122 06:10:40.926444 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-catalog-content\") pod \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " Jan 22 06:10:40 crc kubenswrapper[4814]: I0122 06:10:40.926532 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-utilities\") pod \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " Jan 22 06:10:40 crc kubenswrapper[4814]: I0122 06:10:40.926710 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzhnz\" (UniqueName: \"kubernetes.io/projected/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-kube-api-access-mzhnz\") pod \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\" (UID: \"c7c71a2b-ff71-49b6-b5a1-88851e29aff7\") " Jan 22 06:10:40 crc kubenswrapper[4814]: I0122 06:10:40.928892 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-utilities" (OuterVolumeSpecName: "utilities") pod "c7c71a2b-ff71-49b6-b5a1-88851e29aff7" (UID: "c7c71a2b-ff71-49b6-b5a1-88851e29aff7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:10:40 crc kubenswrapper[4814]: I0122 06:10:40.947116 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-kube-api-access-mzhnz" (OuterVolumeSpecName: "kube-api-access-mzhnz") pod "c7c71a2b-ff71-49b6-b5a1-88851e29aff7" (UID: "c7c71a2b-ff71-49b6-b5a1-88851e29aff7"). InnerVolumeSpecName "kube-api-access-mzhnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:10:40 crc kubenswrapper[4814]: I0122 06:10:40.947857 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c7c71a2b-ff71-49b6-b5a1-88851e29aff7" (UID: "c7c71a2b-ff71-49b6-b5a1-88851e29aff7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.028397 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.028425 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzhnz\" (UniqueName: \"kubernetes.io/projected/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-kube-api-access-mzhnz\") on node \"crc\" DevicePath \"\"" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.028436 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c7c71a2b-ff71-49b6-b5a1-88851e29aff7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.287994 4814 generic.go:334] "Generic (PLEG): container finished" podID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerID="78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021" exitCode=0 Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.288038 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cs4l5" event={"ID":"c7c71a2b-ff71-49b6-b5a1-88851e29aff7","Type":"ContainerDied","Data":"78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021"} Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.288064 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cs4l5" event={"ID":"c7c71a2b-ff71-49b6-b5a1-88851e29aff7","Type":"ContainerDied","Data":"9ab21e37e3ee8c7f2987902ceb8ecb4a112c7c5183056f1bd1c1be845fda01bf"} Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.288086 4814 scope.go:117] "RemoveContainer" containerID="78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.288104 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cs4l5" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.314056 4814 scope.go:117] "RemoveContainer" containerID="ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.334035 4814 scope.go:117] "RemoveContainer" containerID="1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.335360 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cs4l5"] Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.343773 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cs4l5"] Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.376479 4814 scope.go:117] "RemoveContainer" containerID="78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021" Jan 22 06:10:41 crc kubenswrapper[4814]: E0122 06:10:41.377817 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021\": container with ID starting with 78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021 not found: ID does not exist" containerID="78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.389198 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021"} err="failed to get container status \"78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021\": rpc error: code = NotFound desc = could not find container \"78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021\": container with ID starting with 78a9b7a4789596f7bebdadfe251fefc9b0bf0ec97e38f7b0c1baec4e94f42021 not found: ID does not exist" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.390127 4814 scope.go:117] "RemoveContainer" containerID="ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd" Jan 22 06:10:41 crc kubenswrapper[4814]: E0122 06:10:41.390758 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd\": container with ID starting with ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd not found: ID does not exist" containerID="ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.390803 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd"} err="failed to get container status \"ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd\": rpc error: code = NotFound desc = could not find container \"ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd\": container with ID starting with ca31ea234558b5d6f9b2725071d156d26de85eb97de76462db3cc16de1806edd not found: ID does not exist" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.390830 4814 scope.go:117] "RemoveContainer" containerID="1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c" Jan 22 06:10:41 crc kubenswrapper[4814]: E0122 06:10:41.391144 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c\": container with ID starting with 1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c not found: ID does not exist" containerID="1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c" Jan 22 06:10:41 crc kubenswrapper[4814]: I0122 06:10:41.391273 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c"} err="failed to get container status \"1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c\": rpc error: code = NotFound desc = could not find container \"1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c\": container with ID starting with 1d9fa9523810a79d708d6c97ec61cb7469d37593a4b7c76525d513c8b64bce0c not found: ID does not exist" Jan 22 06:10:42 crc kubenswrapper[4814]: I0122 06:10:42.356694 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" path="/var/lib/kubelet/pods/c7c71a2b-ff71-49b6-b5a1-88851e29aff7/volumes" Jan 22 06:10:45 crc kubenswrapper[4814]: I0122 06:10:45.344010 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:10:45 crc kubenswrapper[4814]: E0122 06:10:45.344506 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:10:56 crc kubenswrapper[4814]: I0122 06:10:56.347338 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:10:56 crc kubenswrapper[4814]: E0122 06:10:56.348123 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:11:11 crc kubenswrapper[4814]: I0122 06:11:11.343911 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:11:11 crc kubenswrapper[4814]: E0122 06:11:11.344532 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:11:23 crc kubenswrapper[4814]: I0122 06:11:23.344279 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:11:23 crc kubenswrapper[4814]: E0122 06:11:23.345413 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:11:38 crc kubenswrapper[4814]: I0122 06:11:38.344336 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:11:38 crc kubenswrapper[4814]: E0122 06:11:38.345159 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:11:52 crc kubenswrapper[4814]: I0122 06:11:52.344294 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:11:52 crc kubenswrapper[4814]: E0122 06:11:52.345082 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:12:04 crc kubenswrapper[4814]: I0122 06:12:04.350377 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:12:04 crc kubenswrapper[4814]: E0122 06:12:04.351174 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:12:18 crc kubenswrapper[4814]: I0122 06:12:18.345480 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:12:18 crc kubenswrapper[4814]: E0122 06:12:18.347039 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:12:33 crc kubenswrapper[4814]: I0122 06:12:33.343296 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:12:33 crc kubenswrapper[4814]: E0122 06:12:33.344762 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:12:48 crc kubenswrapper[4814]: I0122 06:12:48.344561 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:12:48 crc kubenswrapper[4814]: E0122 06:12:48.345237 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:13:00 crc kubenswrapper[4814]: I0122 06:13:00.345492 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:13:01 crc kubenswrapper[4814]: I0122 06:13:01.592111 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"2a32758657b76610d37069f534a48f88945c31b2fb62507c6e1d7eb88b1dc72d"} Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.527198 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jcbxv"] Jan 22 06:14:05 crc kubenswrapper[4814]: E0122 06:14:05.534763 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerName="registry-server" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.534953 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerName="registry-server" Jan 22 06:14:05 crc kubenswrapper[4814]: E0122 06:14:05.535187 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerName="extract-content" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.535219 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerName="extract-content" Jan 22 06:14:05 crc kubenswrapper[4814]: E0122 06:14:05.535255 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerName="extract-utilities" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.535264 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerName="extract-utilities" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.536256 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7c71a2b-ff71-49b6-b5a1-88851e29aff7" containerName="registry-server" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.540467 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.679968 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jcbxv"] Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.731396 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsszj\" (UniqueName: \"kubernetes.io/projected/559b7fe5-3253-43b5-9f63-7ff41616f230-kube-api-access-vsszj\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.731464 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-utilities\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.731661 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-catalog-content\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.833071 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-catalog-content\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.833199 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsszj\" (UniqueName: \"kubernetes.io/projected/559b7fe5-3253-43b5-9f63-7ff41616f230-kube-api-access-vsszj\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.833220 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-utilities\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.836089 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-utilities\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.836501 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-catalog-content\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:05 crc kubenswrapper[4814]: I0122 06:14:05.863308 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsszj\" (UniqueName: \"kubernetes.io/projected/559b7fe5-3253-43b5-9f63-7ff41616f230-kube-api-access-vsszj\") pod \"redhat-operators-jcbxv\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:06 crc kubenswrapper[4814]: I0122 06:14:06.161868 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:07 crc kubenswrapper[4814]: I0122 06:14:07.257958 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jcbxv"] Jan 22 06:14:08 crc kubenswrapper[4814]: I0122 06:14:08.210359 4814 generic.go:334] "Generic (PLEG): container finished" podID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerID="6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992" exitCode=0 Jan 22 06:14:08 crc kubenswrapper[4814]: I0122 06:14:08.210660 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbxv" event={"ID":"559b7fe5-3253-43b5-9f63-7ff41616f230","Type":"ContainerDied","Data":"6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992"} Jan 22 06:14:08 crc kubenswrapper[4814]: I0122 06:14:08.211042 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbxv" event={"ID":"559b7fe5-3253-43b5-9f63-7ff41616f230","Type":"ContainerStarted","Data":"1ff5953fdf3875a9dc84ad78af8602cfd586cde9c98d4c251040afb70ac84ca1"} Jan 22 06:14:08 crc kubenswrapper[4814]: I0122 06:14:08.220176 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:14:10 crc kubenswrapper[4814]: I0122 06:14:10.231264 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbxv" event={"ID":"559b7fe5-3253-43b5-9f63-7ff41616f230","Type":"ContainerStarted","Data":"d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3"} Jan 22 06:14:15 crc kubenswrapper[4814]: I0122 06:14:15.280515 4814 generic.go:334] "Generic (PLEG): container finished" podID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerID="d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3" exitCode=0 Jan 22 06:14:15 crc kubenswrapper[4814]: I0122 06:14:15.280590 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbxv" event={"ID":"559b7fe5-3253-43b5-9f63-7ff41616f230","Type":"ContainerDied","Data":"d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3"} Jan 22 06:14:16 crc kubenswrapper[4814]: I0122 06:14:16.289932 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbxv" event={"ID":"559b7fe5-3253-43b5-9f63-7ff41616f230","Type":"ContainerStarted","Data":"607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44"} Jan 22 06:14:26 crc kubenswrapper[4814]: I0122 06:14:26.163343 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:26 crc kubenswrapper[4814]: I0122 06:14:26.164089 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:27 crc kubenswrapper[4814]: I0122 06:14:27.254200 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jcbxv" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="registry-server" probeResult="failure" output=< Jan 22 06:14:27 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:14:27 crc kubenswrapper[4814]: > Jan 22 06:14:37 crc kubenswrapper[4814]: I0122 06:14:37.225042 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jcbxv" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="registry-server" probeResult="failure" output=< Jan 22 06:14:37 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:14:37 crc kubenswrapper[4814]: > Jan 22 06:14:46 crc kubenswrapper[4814]: I0122 06:14:46.266024 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:46 crc kubenswrapper[4814]: I0122 06:14:46.310864 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jcbxv" podStartSLOduration=33.824272052 podStartE2EDuration="41.305697291s" podCreationTimestamp="2026-01-22 06:14:05 +0000 UTC" firstStartedPulling="2026-01-22 06:14:08.217792046 +0000 UTC m=+3334.301280261" lastFinishedPulling="2026-01-22 06:14:15.699217285 +0000 UTC m=+3341.782705500" observedRunningTime="2026-01-22 06:14:16.325013984 +0000 UTC m=+3342.408502209" watchObservedRunningTime="2026-01-22 06:14:46.305697291 +0000 UTC m=+3372.389185516" Jan 22 06:14:46 crc kubenswrapper[4814]: I0122 06:14:46.333713 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:46 crc kubenswrapper[4814]: I0122 06:14:46.630779 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jcbxv"] Jan 22 06:14:47 crc kubenswrapper[4814]: I0122 06:14:47.554725 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jcbxv" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="registry-server" containerID="cri-o://607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44" gracePeriod=2 Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.473333 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.563986 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcbxv" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.564020 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbxv" event={"ID":"559b7fe5-3253-43b5-9f63-7ff41616f230","Type":"ContainerDied","Data":"607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44"} Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.564221 4814 generic.go:334] "Generic (PLEG): container finished" podID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerID="607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44" exitCode=0 Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.564421 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcbxv" event={"ID":"559b7fe5-3253-43b5-9f63-7ff41616f230","Type":"ContainerDied","Data":"1ff5953fdf3875a9dc84ad78af8602cfd586cde9c98d4c251040afb70ac84ca1"} Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.565418 4814 scope.go:117] "RemoveContainer" containerID="607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.599159 4814 scope.go:117] "RemoveContainer" containerID="d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.632916 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-catalog-content\") pod \"559b7fe5-3253-43b5-9f63-7ff41616f230\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.633096 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsszj\" (UniqueName: \"kubernetes.io/projected/559b7fe5-3253-43b5-9f63-7ff41616f230-kube-api-access-vsszj\") pod \"559b7fe5-3253-43b5-9f63-7ff41616f230\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.633173 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-utilities\") pod \"559b7fe5-3253-43b5-9f63-7ff41616f230\" (UID: \"559b7fe5-3253-43b5-9f63-7ff41616f230\") " Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.634533 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-utilities" (OuterVolumeSpecName: "utilities") pod "559b7fe5-3253-43b5-9f63-7ff41616f230" (UID: "559b7fe5-3253-43b5-9f63-7ff41616f230"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.640301 4814 scope.go:117] "RemoveContainer" containerID="6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.649281 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/559b7fe5-3253-43b5-9f63-7ff41616f230-kube-api-access-vsszj" (OuterVolumeSpecName: "kube-api-access-vsszj") pod "559b7fe5-3253-43b5-9f63-7ff41616f230" (UID: "559b7fe5-3253-43b5-9f63-7ff41616f230"). InnerVolumeSpecName "kube-api-access-vsszj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.719291 4814 scope.go:117] "RemoveContainer" containerID="607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44" Jan 22 06:14:48 crc kubenswrapper[4814]: E0122 06:14:48.720866 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44\": container with ID starting with 607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44 not found: ID does not exist" containerID="607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.720960 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44"} err="failed to get container status \"607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44\": rpc error: code = NotFound desc = could not find container \"607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44\": container with ID starting with 607366ef47bcc3e9dca9c0a0ff31ca46923a075d8fa36a0a8c5ba9b9fcbc9f44 not found: ID does not exist" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.720985 4814 scope.go:117] "RemoveContainer" containerID="d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3" Jan 22 06:14:48 crc kubenswrapper[4814]: E0122 06:14:48.721239 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3\": container with ID starting with d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3 not found: ID does not exist" containerID="d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.721262 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3"} err="failed to get container status \"d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3\": rpc error: code = NotFound desc = could not find container \"d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3\": container with ID starting with d6f50bfb140b0807f767eaeb4f7b54bc25ec78a58af90119902d11268ba384a3 not found: ID does not exist" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.721277 4814 scope.go:117] "RemoveContainer" containerID="6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992" Jan 22 06:14:48 crc kubenswrapper[4814]: E0122 06:14:48.722018 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992\": container with ID starting with 6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992 not found: ID does not exist" containerID="6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.722043 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992"} err="failed to get container status \"6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992\": rpc error: code = NotFound desc = could not find container \"6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992\": container with ID starting with 6803be5265d57c940de52c3b8aee028cb288227ba633496606c949fcf399b992 not found: ID does not exist" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.736914 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsszj\" (UniqueName: \"kubernetes.io/projected/559b7fe5-3253-43b5-9f63-7ff41616f230-kube-api-access-vsszj\") on node \"crc\" DevicePath \"\"" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.736943 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.757374 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "559b7fe5-3253-43b5-9f63-7ff41616f230" (UID: "559b7fe5-3253-43b5-9f63-7ff41616f230"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.845213 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/559b7fe5-3253-43b5-9f63-7ff41616f230-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.919567 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jcbxv"] Jan 22 06:14:48 crc kubenswrapper[4814]: I0122 06:14:48.928933 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jcbxv"] Jan 22 06:14:50 crc kubenswrapper[4814]: I0122 06:14:50.357884 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" path="/var/lib/kubelet/pods/559b7fe5-3253-43b5-9f63-7ff41616f230/volumes" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.586205 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5"] Jan 22 06:15:00 crc kubenswrapper[4814]: E0122 06:15:00.588790 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="extract-utilities" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.588855 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="extract-utilities" Jan 22 06:15:00 crc kubenswrapper[4814]: E0122 06:15:00.588883 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="extract-content" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.588896 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="extract-content" Jan 22 06:15:00 crc kubenswrapper[4814]: E0122 06:15:00.588937 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="registry-server" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.588949 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="registry-server" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.589558 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="559b7fe5-3253-43b5-9f63-7ff41616f230" containerName="registry-server" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.591937 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.595168 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.596301 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.699106 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5"] Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.775785 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r85m5\" (UniqueName: \"kubernetes.io/projected/9808200d-e1b4-4644-80f3-af964fcdd471-kube-api-access-r85m5\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.775871 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9808200d-e1b4-4644-80f3-af964fcdd471-config-volume\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.775894 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9808200d-e1b4-4644-80f3-af964fcdd471-secret-volume\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.877925 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r85m5\" (UniqueName: \"kubernetes.io/projected/9808200d-e1b4-4644-80f3-af964fcdd471-kube-api-access-r85m5\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.878039 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9808200d-e1b4-4644-80f3-af964fcdd471-config-volume\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.878063 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9808200d-e1b4-4644-80f3-af964fcdd471-secret-volume\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.883218 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9808200d-e1b4-4644-80f3-af964fcdd471-config-volume\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.899509 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r85m5\" (UniqueName: \"kubernetes.io/projected/9808200d-e1b4-4644-80f3-af964fcdd471-kube-api-access-r85m5\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.899800 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9808200d-e1b4-4644-80f3-af964fcdd471-secret-volume\") pod \"collect-profiles-29484375-5zqr5\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:00 crc kubenswrapper[4814]: I0122 06:15:00.914368 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:01 crc kubenswrapper[4814]: I0122 06:15:01.822672 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5"] Jan 22 06:15:02 crc kubenswrapper[4814]: I0122 06:15:02.701750 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" event={"ID":"9808200d-e1b4-4644-80f3-af964fcdd471","Type":"ContainerStarted","Data":"4cfcd740a79acb5231250b6f608754dbe65d82b441be5a90ff30536e25347e85"} Jan 22 06:15:02 crc kubenswrapper[4814]: I0122 06:15:02.702160 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" event={"ID":"9808200d-e1b4-4644-80f3-af964fcdd471","Type":"ContainerStarted","Data":"0fa7559ee11e7d626974c585e4005acfb1e7e6714ad4ab485886fc2e13546cc4"} Jan 22 06:15:02 crc kubenswrapper[4814]: I0122 06:15:02.784131 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" podStartSLOduration=2.78381473 podStartE2EDuration="2.78381473s" podCreationTimestamp="2026-01-22 06:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:15:02.766544822 +0000 UTC m=+3388.850033097" watchObservedRunningTime="2026-01-22 06:15:02.78381473 +0000 UTC m=+3388.867302975" Jan 22 06:15:03 crc kubenswrapper[4814]: I0122 06:15:03.711491 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" event={"ID":"9808200d-e1b4-4644-80f3-af964fcdd471","Type":"ContainerDied","Data":"4cfcd740a79acb5231250b6f608754dbe65d82b441be5a90ff30536e25347e85"} Jan 22 06:15:03 crc kubenswrapper[4814]: I0122 06:15:03.711796 4814 generic.go:334] "Generic (PLEG): container finished" podID="9808200d-e1b4-4644-80f3-af964fcdd471" containerID="4cfcd740a79acb5231250b6f608754dbe65d82b441be5a90ff30536e25347e85" exitCode=0 Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.388455 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.570579 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9808200d-e1b4-4644-80f3-af964fcdd471-secret-volume\") pod \"9808200d-e1b4-4644-80f3-af964fcdd471\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.570694 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9808200d-e1b4-4644-80f3-af964fcdd471-config-volume\") pod \"9808200d-e1b4-4644-80f3-af964fcdd471\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.570740 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r85m5\" (UniqueName: \"kubernetes.io/projected/9808200d-e1b4-4644-80f3-af964fcdd471-kube-api-access-r85m5\") pod \"9808200d-e1b4-4644-80f3-af964fcdd471\" (UID: \"9808200d-e1b4-4644-80f3-af964fcdd471\") " Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.572373 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9808200d-e1b4-4644-80f3-af964fcdd471-config-volume" (OuterVolumeSpecName: "config-volume") pod "9808200d-e1b4-4644-80f3-af964fcdd471" (UID: "9808200d-e1b4-4644-80f3-af964fcdd471"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.591940 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9808200d-e1b4-4644-80f3-af964fcdd471-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9808200d-e1b4-4644-80f3-af964fcdd471" (UID: "9808200d-e1b4-4644-80f3-af964fcdd471"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.596687 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9808200d-e1b4-4644-80f3-af964fcdd471-kube-api-access-r85m5" (OuterVolumeSpecName: "kube-api-access-r85m5") pod "9808200d-e1b4-4644-80f3-af964fcdd471" (UID: "9808200d-e1b4-4644-80f3-af964fcdd471"). InnerVolumeSpecName "kube-api-access-r85m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.672696 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r85m5\" (UniqueName: \"kubernetes.io/projected/9808200d-e1b4-4644-80f3-af964fcdd471-kube-api-access-r85m5\") on node \"crc\" DevicePath \"\"" Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.672731 4814 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9808200d-e1b4-4644-80f3-af964fcdd471-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.672741 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9808200d-e1b4-4644-80f3-af964fcdd471-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.743742 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" event={"ID":"9808200d-e1b4-4644-80f3-af964fcdd471","Type":"ContainerDied","Data":"0fa7559ee11e7d626974c585e4005acfb1e7e6714ad4ab485886fc2e13546cc4"} Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.743940 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5" Jan 22 06:15:05 crc kubenswrapper[4814]: I0122 06:15:05.743955 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0fa7559ee11e7d626974c585e4005acfb1e7e6714ad4ab485886fc2e13546cc4" Jan 22 06:15:06 crc kubenswrapper[4814]: I0122 06:15:06.490015 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb"] Jan 22 06:15:06 crc kubenswrapper[4814]: I0122 06:15:06.498304 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484330-jq5wb"] Jan 22 06:15:08 crc kubenswrapper[4814]: I0122 06:15:08.358375 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e12e9b03-1d35-47a8-a8e8-47899f80a4f2" path="/var/lib/kubelet/pods/e12e9b03-1d35-47a8-a8e8-47899f80a4f2/volumes" Jan 22 06:15:19 crc kubenswrapper[4814]: I0122 06:15:19.614301 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:15:19 crc kubenswrapper[4814]: I0122 06:15:19.615333 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:15:37 crc kubenswrapper[4814]: I0122 06:15:37.976743 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7hzb9"] Jan 22 06:15:37 crc kubenswrapper[4814]: E0122 06:15:37.978350 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9808200d-e1b4-4644-80f3-af964fcdd471" containerName="collect-profiles" Jan 22 06:15:37 crc kubenswrapper[4814]: I0122 06:15:37.978368 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9808200d-e1b4-4644-80f3-af964fcdd471" containerName="collect-profiles" Jan 22 06:15:37 crc kubenswrapper[4814]: I0122 06:15:37.979278 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="9808200d-e1b4-4644-80f3-af964fcdd471" containerName="collect-profiles" Jan 22 06:15:37 crc kubenswrapper[4814]: I0122 06:15:37.984011 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:37 crc kubenswrapper[4814]: I0122 06:15:37.999153 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7hzb9"] Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.103966 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-utilities\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.104186 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-catalog-content\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.104345 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlc5h\" (UniqueName: \"kubernetes.io/projected/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-kube-api-access-hlc5h\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.206597 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-catalog-content\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.207190 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlc5h\" (UniqueName: \"kubernetes.io/projected/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-kube-api-access-hlc5h\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.207408 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-utilities\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.208533 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-catalog-content\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.208923 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-utilities\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.241021 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlc5h\" (UniqueName: \"kubernetes.io/projected/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-kube-api-access-hlc5h\") pod \"community-operators-7hzb9\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:38 crc kubenswrapper[4814]: I0122 06:15:38.313226 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:39 crc kubenswrapper[4814]: I0122 06:15:39.188554 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7hzb9"] Jan 22 06:15:39 crc kubenswrapper[4814]: I0122 06:15:39.491162 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7hzb9" event={"ID":"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc","Type":"ContainerStarted","Data":"1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f"} Jan 22 06:15:39 crc kubenswrapper[4814]: I0122 06:15:39.491410 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7hzb9" event={"ID":"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc","Type":"ContainerStarted","Data":"b5c380ae21c9e39252ff683981566659ebd6934ce30fd6825b7e77b01d5c5555"} Jan 22 06:15:40 crc kubenswrapper[4814]: I0122 06:15:40.500385 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7hzb9" event={"ID":"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc","Type":"ContainerDied","Data":"1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f"} Jan 22 06:15:40 crc kubenswrapper[4814]: I0122 06:15:40.500736 4814 generic.go:334] "Generic (PLEG): container finished" podID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerID="1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f" exitCode=0 Jan 22 06:15:41 crc kubenswrapper[4814]: I0122 06:15:41.510648 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7hzb9" event={"ID":"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc","Type":"ContainerStarted","Data":"087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518"} Jan 22 06:15:42 crc kubenswrapper[4814]: I0122 06:15:42.523437 4814 generic.go:334] "Generic (PLEG): container finished" podID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerID="087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518" exitCode=0 Jan 22 06:15:42 crc kubenswrapper[4814]: I0122 06:15:42.523528 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7hzb9" event={"ID":"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc","Type":"ContainerDied","Data":"087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518"} Jan 22 06:15:43 crc kubenswrapper[4814]: I0122 06:15:43.533177 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7hzb9" event={"ID":"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc","Type":"ContainerStarted","Data":"0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3"} Jan 22 06:15:43 crc kubenswrapper[4814]: I0122 06:15:43.554091 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7hzb9" podStartSLOduration=4.131427454 podStartE2EDuration="6.553605579s" podCreationTimestamp="2026-01-22 06:15:37 +0000 UTC" firstStartedPulling="2026-01-22 06:15:40.502084934 +0000 UTC m=+3426.585573149" lastFinishedPulling="2026-01-22 06:15:42.924263059 +0000 UTC m=+3429.007751274" observedRunningTime="2026-01-22 06:15:43.551136562 +0000 UTC m=+3429.634624777" watchObservedRunningTime="2026-01-22 06:15:43.553605579 +0000 UTC m=+3429.637093794" Jan 22 06:15:44 crc kubenswrapper[4814]: I0122 06:15:44.019196 4814 scope.go:117] "RemoveContainer" containerID="6b0382515290bcd633eae672ef4f6d930f86e16eaebd5e5f2cfcd55bcf5a40b3" Jan 22 06:15:48 crc kubenswrapper[4814]: I0122 06:15:48.313879 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:48 crc kubenswrapper[4814]: I0122 06:15:48.314467 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:49 crc kubenswrapper[4814]: I0122 06:15:49.362921 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-7hzb9" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="registry-server" probeResult="failure" output=< Jan 22 06:15:49 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:15:49 crc kubenswrapper[4814]: > Jan 22 06:15:49 crc kubenswrapper[4814]: I0122 06:15:49.613588 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:15:49 crc kubenswrapper[4814]: I0122 06:15:49.613669 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:15:58 crc kubenswrapper[4814]: I0122 06:15:58.372216 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:58 crc kubenswrapper[4814]: I0122 06:15:58.434236 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:15:58 crc kubenswrapper[4814]: I0122 06:15:58.618096 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7hzb9"] Jan 22 06:15:59 crc kubenswrapper[4814]: I0122 06:15:59.664375 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7hzb9" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="registry-server" containerID="cri-o://0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3" gracePeriod=2 Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.551126 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.676998 4814 generic.go:334] "Generic (PLEG): container finished" podID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerID="0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3" exitCode=0 Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.677051 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7hzb9" event={"ID":"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc","Type":"ContainerDied","Data":"0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3"} Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.677077 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7hzb9" event={"ID":"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc","Type":"ContainerDied","Data":"b5c380ae21c9e39252ff683981566659ebd6934ce30fd6825b7e77b01d5c5555"} Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.677097 4814 scope.go:117] "RemoveContainer" containerID="0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.678074 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7hzb9" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.714616 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-catalog-content\") pod \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.714950 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlc5h\" (UniqueName: \"kubernetes.io/projected/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-kube-api-access-hlc5h\") pod \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.714978 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-utilities\") pod \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\" (UID: \"eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc\") " Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.717113 4814 scope.go:117] "RemoveContainer" containerID="087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.716857 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-utilities" (OuterVolumeSpecName: "utilities") pod "eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" (UID: "eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.732996 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-kube-api-access-hlc5h" (OuterVolumeSpecName: "kube-api-access-hlc5h") pod "eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" (UID: "eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc"). InnerVolumeSpecName "kube-api-access-hlc5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.768901 4814 scope.go:117] "RemoveContainer" containerID="1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.802443 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" (UID: "eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.818904 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.818940 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlc5h\" (UniqueName: \"kubernetes.io/projected/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-kube-api-access-hlc5h\") on node \"crc\" DevicePath \"\"" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.818952 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.836396 4814 scope.go:117] "RemoveContainer" containerID="0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3" Jan 22 06:16:00 crc kubenswrapper[4814]: E0122 06:16:00.837778 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3\": container with ID starting with 0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3 not found: ID does not exist" containerID="0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.837895 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3"} err="failed to get container status \"0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3\": rpc error: code = NotFound desc = could not find container \"0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3\": container with ID starting with 0d7f1cb6350874bf3a6cfd85b0ac76dec4de32c8943e1eb5786165e8fc7145d3 not found: ID does not exist" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.837927 4814 scope.go:117] "RemoveContainer" containerID="087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518" Jan 22 06:16:00 crc kubenswrapper[4814]: E0122 06:16:00.838306 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518\": container with ID starting with 087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518 not found: ID does not exist" containerID="087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.838335 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518"} err="failed to get container status \"087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518\": rpc error: code = NotFound desc = could not find container \"087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518\": container with ID starting with 087121a9e090f6a6c9579ffda7afb79bc27dbf50f44f46a98cb11ce5dc690518 not found: ID does not exist" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.838353 4814 scope.go:117] "RemoveContainer" containerID="1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f" Jan 22 06:16:00 crc kubenswrapper[4814]: E0122 06:16:00.838667 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f\": container with ID starting with 1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f not found: ID does not exist" containerID="1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f" Jan 22 06:16:00 crc kubenswrapper[4814]: I0122 06:16:00.838781 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f"} err="failed to get container status \"1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f\": rpc error: code = NotFound desc = could not find container \"1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f\": container with ID starting with 1af01b4befb3fffde4e14c74fd7efd49665b2c6900547feaebbc6f253de2516f not found: ID does not exist" Jan 22 06:16:01 crc kubenswrapper[4814]: I0122 06:16:01.018810 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7hzb9"] Jan 22 06:16:01 crc kubenswrapper[4814]: I0122 06:16:01.049017 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7hzb9"] Jan 22 06:16:02 crc kubenswrapper[4814]: I0122 06:16:02.356479 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" path="/var/lib/kubelet/pods/eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc/volumes" Jan 22 06:16:19 crc kubenswrapper[4814]: I0122 06:16:19.613984 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:16:19 crc kubenswrapper[4814]: I0122 06:16:19.614558 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:16:19 crc kubenswrapper[4814]: I0122 06:16:19.614608 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:16:19 crc kubenswrapper[4814]: I0122 06:16:19.615392 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2a32758657b76610d37069f534a48f88945c31b2fb62507c6e1d7eb88b1dc72d"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:16:19 crc kubenswrapper[4814]: I0122 06:16:19.615445 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://2a32758657b76610d37069f534a48f88945c31b2fb62507c6e1d7eb88b1dc72d" gracePeriod=600 Jan 22 06:16:19 crc kubenswrapper[4814]: I0122 06:16:19.841025 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="2a32758657b76610d37069f534a48f88945c31b2fb62507c6e1d7eb88b1dc72d" exitCode=0 Jan 22 06:16:19 crc kubenswrapper[4814]: I0122 06:16:19.841445 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"2a32758657b76610d37069f534a48f88945c31b2fb62507c6e1d7eb88b1dc72d"} Jan 22 06:16:19 crc kubenswrapper[4814]: I0122 06:16:19.841557 4814 scope.go:117] "RemoveContainer" containerID="ad6fe2e3db49c2eabbc6026fbeccdeb32e4d54622a3e1298cc1f01acc4a53101" Jan 22 06:16:20 crc kubenswrapper[4814]: I0122 06:16:20.849596 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1"} Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.365779 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-899ws"] Jan 22 06:17:15 crc kubenswrapper[4814]: E0122 06:17:15.371531 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="registry-server" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.371808 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="registry-server" Jan 22 06:17:15 crc kubenswrapper[4814]: E0122 06:17:15.371895 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="extract-content" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.372069 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="extract-content" Jan 22 06:17:15 crc kubenswrapper[4814]: E0122 06:17:15.372197 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="extract-utilities" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.372274 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="extract-utilities" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.373018 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb3dd5f3-f0b7-4f7c-80a9-a02a81d394fc" containerName="registry-server" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.375500 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.402137 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-899ws"] Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.432520 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-catalog-content\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.432863 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsbn8\" (UniqueName: \"kubernetes.io/projected/971233fd-cd3e-48d4-bd4e-48868a6438ee-kube-api-access-nsbn8\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.433335 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-utilities\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.535204 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsbn8\" (UniqueName: \"kubernetes.io/projected/971233fd-cd3e-48d4-bd4e-48868a6438ee-kube-api-access-nsbn8\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.535415 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-utilities\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.535501 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-catalog-content\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.535945 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-utilities\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.536033 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-catalog-content\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.557807 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsbn8\" (UniqueName: \"kubernetes.io/projected/971233fd-cd3e-48d4-bd4e-48868a6438ee-kube-api-access-nsbn8\") pod \"certified-operators-899ws\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:15 crc kubenswrapper[4814]: I0122 06:17:15.732406 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:16 crc kubenswrapper[4814]: I0122 06:17:16.287379 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-899ws"] Jan 22 06:17:16 crc kubenswrapper[4814]: I0122 06:17:16.371386 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-899ws" event={"ID":"971233fd-cd3e-48d4-bd4e-48868a6438ee","Type":"ContainerStarted","Data":"a2a2e1d1cdd97997836efd2a2b4a2e06e6c1edde781e68ea3822dbf797d6fb18"} Jan 22 06:17:17 crc kubenswrapper[4814]: I0122 06:17:17.385139 4814 generic.go:334] "Generic (PLEG): container finished" podID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerID="2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf" exitCode=0 Jan 22 06:17:17 crc kubenswrapper[4814]: I0122 06:17:17.385193 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-899ws" event={"ID":"971233fd-cd3e-48d4-bd4e-48868a6438ee","Type":"ContainerDied","Data":"2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf"} Jan 22 06:17:18 crc kubenswrapper[4814]: I0122 06:17:18.394418 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-899ws" event={"ID":"971233fd-cd3e-48d4-bd4e-48868a6438ee","Type":"ContainerStarted","Data":"d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96"} Jan 22 06:17:20 crc kubenswrapper[4814]: I0122 06:17:20.411937 4814 generic.go:334] "Generic (PLEG): container finished" podID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerID="d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96" exitCode=0 Jan 22 06:17:20 crc kubenswrapper[4814]: I0122 06:17:20.412006 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-899ws" event={"ID":"971233fd-cd3e-48d4-bd4e-48868a6438ee","Type":"ContainerDied","Data":"d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96"} Jan 22 06:17:21 crc kubenswrapper[4814]: I0122 06:17:21.423611 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-899ws" event={"ID":"971233fd-cd3e-48d4-bd4e-48868a6438ee","Type":"ContainerStarted","Data":"7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d"} Jan 22 06:17:21 crc kubenswrapper[4814]: I0122 06:17:21.454425 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-899ws" podStartSLOduration=2.946037871 podStartE2EDuration="6.453981741s" podCreationTimestamp="2026-01-22 06:17:15 +0000 UTC" firstStartedPulling="2026-01-22 06:17:17.38886891 +0000 UTC m=+3523.472357135" lastFinishedPulling="2026-01-22 06:17:20.89681279 +0000 UTC m=+3526.980301005" observedRunningTime="2026-01-22 06:17:21.445819617 +0000 UTC m=+3527.529307842" watchObservedRunningTime="2026-01-22 06:17:21.453981741 +0000 UTC m=+3527.537469966" Jan 22 06:17:25 crc kubenswrapper[4814]: I0122 06:17:25.733449 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:25 crc kubenswrapper[4814]: I0122 06:17:25.733991 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:25 crc kubenswrapper[4814]: I0122 06:17:25.795779 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:26 crc kubenswrapper[4814]: I0122 06:17:26.627473 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:26 crc kubenswrapper[4814]: I0122 06:17:26.701704 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-899ws"] Jan 22 06:17:28 crc kubenswrapper[4814]: I0122 06:17:28.472528 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-899ws" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerName="registry-server" containerID="cri-o://7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d" gracePeriod=2 Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.114498 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.217856 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-catalog-content\") pod \"971233fd-cd3e-48d4-bd4e-48868a6438ee\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.217954 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsbn8\" (UniqueName: \"kubernetes.io/projected/971233fd-cd3e-48d4-bd4e-48868a6438ee-kube-api-access-nsbn8\") pod \"971233fd-cd3e-48d4-bd4e-48868a6438ee\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.218180 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-utilities\") pod \"971233fd-cd3e-48d4-bd4e-48868a6438ee\" (UID: \"971233fd-cd3e-48d4-bd4e-48868a6438ee\") " Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.219150 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-utilities" (OuterVolumeSpecName: "utilities") pod "971233fd-cd3e-48d4-bd4e-48868a6438ee" (UID: "971233fd-cd3e-48d4-bd4e-48868a6438ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.237643 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/971233fd-cd3e-48d4-bd4e-48868a6438ee-kube-api-access-nsbn8" (OuterVolumeSpecName: "kube-api-access-nsbn8") pod "971233fd-cd3e-48d4-bd4e-48868a6438ee" (UID: "971233fd-cd3e-48d4-bd4e-48868a6438ee"). InnerVolumeSpecName "kube-api-access-nsbn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.251696 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "971233fd-cd3e-48d4-bd4e-48868a6438ee" (UID: "971233fd-cd3e-48d4-bd4e-48868a6438ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.320552 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.320835 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/971233fd-cd3e-48d4-bd4e-48868a6438ee-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.320908 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsbn8\" (UniqueName: \"kubernetes.io/projected/971233fd-cd3e-48d4-bd4e-48868a6438ee-kube-api-access-nsbn8\") on node \"crc\" DevicePath \"\"" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.484253 4814 generic.go:334] "Generic (PLEG): container finished" podID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerID="7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d" exitCode=0 Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.484331 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-899ws" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.484350 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-899ws" event={"ID":"971233fd-cd3e-48d4-bd4e-48868a6438ee","Type":"ContainerDied","Data":"7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d"} Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.484680 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-899ws" event={"ID":"971233fd-cd3e-48d4-bd4e-48868a6438ee","Type":"ContainerDied","Data":"a2a2e1d1cdd97997836efd2a2b4a2e06e6c1edde781e68ea3822dbf797d6fb18"} Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.484713 4814 scope.go:117] "RemoveContainer" containerID="7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.518043 4814 scope.go:117] "RemoveContainer" containerID="d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.542046 4814 scope.go:117] "RemoveContainer" containerID="2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.542179 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-899ws"] Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.555231 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-899ws"] Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.601969 4814 scope.go:117] "RemoveContainer" containerID="7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d" Jan 22 06:17:29 crc kubenswrapper[4814]: E0122 06:17:29.602815 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d\": container with ID starting with 7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d not found: ID does not exist" containerID="7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.602861 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d"} err="failed to get container status \"7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d\": rpc error: code = NotFound desc = could not find container \"7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d\": container with ID starting with 7913552841b9820f4e39bd3b94e4d0c2f559bb19bfb1d8c25e9697cb8069671d not found: ID does not exist" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.602885 4814 scope.go:117] "RemoveContainer" containerID="d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96" Jan 22 06:17:29 crc kubenswrapper[4814]: E0122 06:17:29.605533 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96\": container with ID starting with d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96 not found: ID does not exist" containerID="d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.605595 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96"} err="failed to get container status \"d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96\": rpc error: code = NotFound desc = could not find container \"d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96\": container with ID starting with d3260a76fa6ff4823dd39804c2c21a3645b150b66e84d05efa114a7649c0ed96 not found: ID does not exist" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.605652 4814 scope.go:117] "RemoveContainer" containerID="2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf" Jan 22 06:17:29 crc kubenswrapper[4814]: E0122 06:17:29.606078 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf\": container with ID starting with 2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf not found: ID does not exist" containerID="2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf" Jan 22 06:17:29 crc kubenswrapper[4814]: I0122 06:17:29.606108 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf"} err="failed to get container status \"2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf\": rpc error: code = NotFound desc = could not find container \"2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf\": container with ID starting with 2210fb51596eb8020504231b4ca6f653f07eb6ab857c13e9d3d82720221748cf not found: ID does not exist" Jan 22 06:17:30 crc kubenswrapper[4814]: I0122 06:17:30.359892 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" path="/var/lib/kubelet/pods/971233fd-cd3e-48d4-bd4e-48868a6438ee/volumes" Jan 22 06:18:19 crc kubenswrapper[4814]: I0122 06:18:19.614493 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:18:19 crc kubenswrapper[4814]: I0122 06:18:19.615344 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:18:49 crc kubenswrapper[4814]: I0122 06:18:49.614592 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:18:49 crc kubenswrapper[4814]: I0122 06:18:49.615178 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:19:19 crc kubenswrapper[4814]: I0122 06:19:19.613756 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:19:19 crc kubenswrapper[4814]: I0122 06:19:19.614249 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:19:19 crc kubenswrapper[4814]: I0122 06:19:19.614297 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:19:19 crc kubenswrapper[4814]: I0122 06:19:19.614984 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:19:19 crc kubenswrapper[4814]: I0122 06:19:19.615035 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" gracePeriod=600 Jan 22 06:19:19 crc kubenswrapper[4814]: E0122 06:19:19.773987 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:19:20 crc kubenswrapper[4814]: I0122 06:19:20.601071 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" exitCode=0 Jan 22 06:19:20 crc kubenswrapper[4814]: I0122 06:19:20.601127 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1"} Jan 22 06:19:20 crc kubenswrapper[4814]: I0122 06:19:20.601187 4814 scope.go:117] "RemoveContainer" containerID="2a32758657b76610d37069f534a48f88945c31b2fb62507c6e1d7eb88b1dc72d" Jan 22 06:19:20 crc kubenswrapper[4814]: I0122 06:19:20.602897 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:19:20 crc kubenswrapper[4814]: E0122 06:19:20.604058 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:19:32 crc kubenswrapper[4814]: I0122 06:19:32.343791 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:19:32 crc kubenswrapper[4814]: E0122 06:19:32.344566 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:19:46 crc kubenswrapper[4814]: I0122 06:19:46.344156 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:19:46 crc kubenswrapper[4814]: E0122 06:19:46.345202 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:19:57 crc kubenswrapper[4814]: I0122 06:19:57.347148 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:19:57 crc kubenswrapper[4814]: E0122 06:19:57.348504 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:20:01 crc kubenswrapper[4814]: I0122 06:20:01.067580 4814 generic.go:334] "Generic (PLEG): container finished" podID="0aa6d92e-884d-41d0-a26e-3e5de31c05a3" containerID="445f73cae808bbd750a0b4628dcd89d656795206f547f3c8f0863b59ae637e69" exitCode=1 Jan 22 06:20:01 crc kubenswrapper[4814]: I0122 06:20:01.067682 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"0aa6d92e-884d-41d0-a26e-3e5de31c05a3","Type":"ContainerDied","Data":"445f73cae808bbd750a0b4628dcd89d656795206f547f3c8f0863b59ae637e69"} Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.711675 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.763294 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ca-certs\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.763376 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ssh-key\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.763467 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.763528 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gghk8\" (UniqueName: \"kubernetes.io/projected/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-kube-api-access-gghk8\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.763603 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-config-data\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.763857 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-workdir\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.764024 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-temporary\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.764184 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.764759 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config-secret\") pod \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\" (UID: \"0aa6d92e-884d-41d0-a26e-3e5de31c05a3\") " Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.764852 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-config-data" (OuterVolumeSpecName: "config-data") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.765421 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.765791 4814 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.765809 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.771010 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.775415 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-kube-api-access-gghk8" (OuterVolumeSpecName: "kube-api-access-gghk8") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "kube-api-access-gghk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.784885 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "test-operator-logs") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.839722 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Jan 22 06:20:02 crc kubenswrapper[4814]: E0122 06:20:02.840429 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerName="registry-server" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.840456 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerName="registry-server" Jan 22 06:20:02 crc kubenswrapper[4814]: E0122 06:20:02.840477 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerName="extract-content" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.840485 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerName="extract-content" Jan 22 06:20:02 crc kubenswrapper[4814]: E0122 06:20:02.840501 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerName="extract-utilities" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.840509 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerName="extract-utilities" Jan 22 06:20:02 crc kubenswrapper[4814]: E0122 06:20:02.840688 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0aa6d92e-884d-41d0-a26e-3e5de31c05a3" containerName="tempest-tests-tempest-tests-runner" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.840702 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="0aa6d92e-884d-41d0-a26e-3e5de31c05a3" containerName="tempest-tests-tempest-tests-runner" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.840935 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="971233fd-cd3e-48d4-bd4e-48868a6438ee" containerName="registry-server" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.840956 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="0aa6d92e-884d-41d0-a26e-3e5de31c05a3" containerName="tempest-tests-tempest-tests-runner" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.843158 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.860383 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.861594 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s1" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.864179 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s1" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.867354 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.867424 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.870451 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.870594 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.870619 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gghk8\" (UniqueName: \"kubernetes.io/projected/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-kube-api-access-gghk8\") on node \"crc\" DevicePath \"\"" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.870652 4814 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.872778 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.878099 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.879708 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.884743 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "0aa6d92e-884d-41d0-a26e-3e5de31c05a3" (UID: "0aa6d92e-884d-41d0-a26e-3e5de31c05a3"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.972373 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.972424 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlpr5\" (UniqueName: \"kubernetes.io/projected/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-kube-api-access-nlpr5\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.972468 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.972492 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.972507 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.973572 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.975775 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.976121 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.976266 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.976459 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.976488 4814 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.976508 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0aa6d92e-884d-41d0-a26e-3e5de31c05a3-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.977020 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-config-data\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.977251 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:02 crc kubenswrapper[4814]: I0122 06:20:02.980386 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config-secret\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.012425 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.078179 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.078225 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlpr5\" (UniqueName: \"kubernetes.io/projected/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-kube-api-access-nlpr5\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.078262 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.078286 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.078326 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.080320 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.081063 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.085816 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ssh-key\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.085872 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ca-certs\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.091799 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" event={"ID":"0aa6d92e-884d-41d0-a26e-3e5de31c05a3","Type":"ContainerDied","Data":"33912a11c5e30aaf2d689fd70de9a11a4507145c1935b81dc56a21a43dc73cd0"} Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.091861 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s00-multi-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.091837 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33912a11c5e30aaf2d689fd70de9a11a4507145c1935b81dc56a21a43dc73cd0" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.095191 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlpr5\" (UniqueName: \"kubernetes.io/projected/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-kube-api-access-nlpr5\") pod \"tempest-tests-tempest-s01-single-thread-testing\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.265351 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 06:20:03 crc kubenswrapper[4814]: I0122 06:20:03.895842 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest-s01-single-thread-testing"] Jan 22 06:20:04 crc kubenswrapper[4814]: I0122 06:20:04.105341 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533","Type":"ContainerStarted","Data":"db33fa3ccddd154da3b71b97f35b6014a1517065a157fa3151ffe8bb63ee4963"} Jan 22 06:20:08 crc kubenswrapper[4814]: I0122 06:20:08.141613 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533","Type":"ContainerStarted","Data":"d3e3c51083cd7432a94f444b017b7b1bdf1c73bc695f74cce6fc779e70a1e335"} Jan 22 06:20:08 crc kubenswrapper[4814]: I0122 06:20:08.175294 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" podStartSLOduration=6.175270254 podStartE2EDuration="6.175270254s" podCreationTimestamp="2026-01-22 06:20:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:20:08.163195482 +0000 UTC m=+3694.246683707" watchObservedRunningTime="2026-01-22 06:20:08.175270254 +0000 UTC m=+3694.258758489" Jan 22 06:20:11 crc kubenswrapper[4814]: I0122 06:20:11.344255 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:20:11 crc kubenswrapper[4814]: E0122 06:20:11.345015 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:20:22 crc kubenswrapper[4814]: I0122 06:20:22.354672 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:20:22 crc kubenswrapper[4814]: E0122 06:20:22.355707 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:20:34 crc kubenswrapper[4814]: I0122 06:20:34.351914 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:20:34 crc kubenswrapper[4814]: E0122 06:20:34.352785 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:20:45 crc kubenswrapper[4814]: I0122 06:20:45.343806 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:20:45 crc kubenswrapper[4814]: E0122 06:20:45.344527 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:20:57 crc kubenswrapper[4814]: I0122 06:20:57.343252 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:20:57 crc kubenswrapper[4814]: E0122 06:20:57.344131 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:21:00 crc kubenswrapper[4814]: I0122 06:21:00.924281 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5cf7d7889-27mtt"] Jan 22 06:21:00 crc kubenswrapper[4814]: I0122 06:21:00.926882 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:00 crc kubenswrapper[4814]: I0122 06:21:00.966300 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5cf7d7889-27mtt"] Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.011514 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-combined-ca-bundle\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.011584 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-ovndb-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.011614 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-public-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.011692 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-httpd-config\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.011754 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-config\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.011796 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd8rt\" (UniqueName: \"kubernetes.io/projected/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-kube-api-access-vd8rt\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.011818 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-internal-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.116695 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-config\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.116807 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd8rt\" (UniqueName: \"kubernetes.io/projected/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-kube-api-access-vd8rt\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.116838 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-internal-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.116998 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-combined-ca-bundle\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.117107 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-ovndb-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.117143 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-public-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.117203 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-httpd-config\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.136492 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-httpd-config\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.145496 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-internal-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.153669 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd8rt\" (UniqueName: \"kubernetes.io/projected/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-kube-api-access-vd8rt\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.156589 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-config\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.156609 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-combined-ca-bundle\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.157796 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-ovndb-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.159199 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-public-tls-certs\") pod \"neutron-5cf7d7889-27mtt\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.247990 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:01 crc kubenswrapper[4814]: I0122 06:21:01.833890 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5cf7d7889-27mtt"] Jan 22 06:21:02 crc kubenswrapper[4814]: I0122 06:21:02.709865 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cf7d7889-27mtt" event={"ID":"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a","Type":"ContainerStarted","Data":"b5ab4f248565a32892ccfb1552a875f4e3e004a7648cf25d1f798f477759e22b"} Jan 22 06:21:02 crc kubenswrapper[4814]: I0122 06:21:02.710318 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:02 crc kubenswrapper[4814]: I0122 06:21:02.710338 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cf7d7889-27mtt" event={"ID":"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a","Type":"ContainerStarted","Data":"9370c2f5eb9fb90e56ce8dfc3b4ef685be64651243a04e857cc9d387515ebbfe"} Jan 22 06:21:02 crc kubenswrapper[4814]: I0122 06:21:02.710356 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cf7d7889-27mtt" event={"ID":"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a","Type":"ContainerStarted","Data":"4504c8e8fae62d0aa1ce406223ed6337600ebb65e15ce93a23c888d83c826826"} Jan 22 06:21:08 crc kubenswrapper[4814]: I0122 06:21:08.344711 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:21:08 crc kubenswrapper[4814]: E0122 06:21:08.347439 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:21:22 crc kubenswrapper[4814]: I0122 06:21:22.344165 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:21:22 crc kubenswrapper[4814]: E0122 06:21:22.344927 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:21:31 crc kubenswrapper[4814]: I0122 06:21:31.274450 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:21:31 crc kubenswrapper[4814]: I0122 06:21:31.315268 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5cf7d7889-27mtt" podStartSLOduration=31.315238332 podStartE2EDuration="31.315238332s" podCreationTimestamp="2026-01-22 06:21:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:21:02.739836235 +0000 UTC m=+3748.823324460" watchObservedRunningTime="2026-01-22 06:21:31.315238332 +0000 UTC m=+3777.398726557" Jan 22 06:21:31 crc kubenswrapper[4814]: I0122 06:21:31.371946 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-58687b7457-jl86n"] Jan 22 06:21:31 crc kubenswrapper[4814]: I0122 06:21:31.372231 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-58687b7457-jl86n" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-api" containerID="cri-o://7214d2a1f39761dae623f6a7f57f7eca2494819d5044c4b0b8161b52304916b9" gracePeriod=30 Jan 22 06:21:31 crc kubenswrapper[4814]: I0122 06:21:31.372377 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-58687b7457-jl86n" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-httpd" containerID="cri-o://703cc6d1048b43a347cc8099f5d47bb1aebdc40d5295fbc80d9678894ecc3558" gracePeriod=30 Jan 22 06:21:31 crc kubenswrapper[4814]: I0122 06:21:31.991137 4814 generic.go:334] "Generic (PLEG): container finished" podID="722c584f-63e7-4817-b5f3-14915fbfe930" containerID="703cc6d1048b43a347cc8099f5d47bb1aebdc40d5295fbc80d9678894ecc3558" exitCode=0 Jan 22 06:21:31 crc kubenswrapper[4814]: I0122 06:21:31.991176 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58687b7457-jl86n" event={"ID":"722c584f-63e7-4817-b5f3-14915fbfe930","Type":"ContainerDied","Data":"703cc6d1048b43a347cc8099f5d47bb1aebdc40d5295fbc80d9678894ecc3558"} Jan 22 06:21:36 crc kubenswrapper[4814]: I0122 06:21:36.344114 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:21:36 crc kubenswrapper[4814]: E0122 06:21:36.345034 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:21:38 crc kubenswrapper[4814]: I0122 06:21:38.119220 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-58687b7457-jl86n" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.169:9696/\": dial tcp 10.217.0.169:9696: connect: connection refused" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.156573 4814 generic.go:334] "Generic (PLEG): container finished" podID="722c584f-63e7-4817-b5f3-14915fbfe930" containerID="7214d2a1f39761dae623f6a7f57f7eca2494819d5044c4b0b8161b52304916b9" exitCode=0 Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.156759 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58687b7457-jl86n" event={"ID":"722c584f-63e7-4817-b5f3-14915fbfe930","Type":"ContainerDied","Data":"7214d2a1f39761dae623f6a7f57f7eca2494819d5044c4b0b8161b52304916b9"} Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.241907 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58687b7457-jl86n" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.313961 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-httpd-config\") pod \"722c584f-63e7-4817-b5f3-14915fbfe930\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.314025 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-public-tls-certs\") pod \"722c584f-63e7-4817-b5f3-14915fbfe930\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.314190 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-internal-tls-certs\") pod \"722c584f-63e7-4817-b5f3-14915fbfe930\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.314220 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-combined-ca-bundle\") pod \"722c584f-63e7-4817-b5f3-14915fbfe930\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.314317 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2pqn\" (UniqueName: \"kubernetes.io/projected/722c584f-63e7-4817-b5f3-14915fbfe930-kube-api-access-m2pqn\") pod \"722c584f-63e7-4817-b5f3-14915fbfe930\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.314379 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-ovndb-tls-certs\") pod \"722c584f-63e7-4817-b5f3-14915fbfe930\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.314423 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-config\") pod \"722c584f-63e7-4817-b5f3-14915fbfe930\" (UID: \"722c584f-63e7-4817-b5f3-14915fbfe930\") " Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.321224 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/722c584f-63e7-4817-b5f3-14915fbfe930-kube-api-access-m2pqn" (OuterVolumeSpecName: "kube-api-access-m2pqn") pod "722c584f-63e7-4817-b5f3-14915fbfe930" (UID: "722c584f-63e7-4817-b5f3-14915fbfe930"). InnerVolumeSpecName "kube-api-access-m2pqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.321681 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "722c584f-63e7-4817-b5f3-14915fbfe930" (UID: "722c584f-63e7-4817-b5f3-14915fbfe930"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.401579 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "722c584f-63e7-4817-b5f3-14915fbfe930" (UID: "722c584f-63e7-4817-b5f3-14915fbfe930"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.403290 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "722c584f-63e7-4817-b5f3-14915fbfe930" (UID: "722c584f-63e7-4817-b5f3-14915fbfe930"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.408767 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "722c584f-63e7-4817-b5f3-14915fbfe930" (UID: "722c584f-63e7-4817-b5f3-14915fbfe930"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.411777 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-config" (OuterVolumeSpecName: "config") pod "722c584f-63e7-4817-b5f3-14915fbfe930" (UID: "722c584f-63e7-4817-b5f3-14915fbfe930"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.417206 4814 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.417234 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.417244 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2pqn\" (UniqueName: \"kubernetes.io/projected/722c584f-63e7-4817-b5f3-14915fbfe930-kube-api-access-m2pqn\") on node \"crc\" DevicePath \"\"" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.417255 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.417263 4814 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.417271 4814 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.427281 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "722c584f-63e7-4817-b5f3-14915fbfe930" (UID: "722c584f-63e7-4817-b5f3-14915fbfe930"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:21:47 crc kubenswrapper[4814]: I0122 06:21:47.518891 4814 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/722c584f-63e7-4817-b5f3-14915fbfe930-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:21:48 crc kubenswrapper[4814]: I0122 06:21:48.171576 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58687b7457-jl86n" event={"ID":"722c584f-63e7-4817-b5f3-14915fbfe930","Type":"ContainerDied","Data":"8515a74d2b69e32d70f3b779024b35117577ecec86860430f0eb2a9d97394e86"} Jan 22 06:21:48 crc kubenswrapper[4814]: I0122 06:21:48.171673 4814 scope.go:117] "RemoveContainer" containerID="703cc6d1048b43a347cc8099f5d47bb1aebdc40d5295fbc80d9678894ecc3558" Jan 22 06:21:48 crc kubenswrapper[4814]: I0122 06:21:48.171700 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58687b7457-jl86n" Jan 22 06:21:48 crc kubenswrapper[4814]: I0122 06:21:48.222894 4814 scope.go:117] "RemoveContainer" containerID="7214d2a1f39761dae623f6a7f57f7eca2494819d5044c4b0b8161b52304916b9" Jan 22 06:21:48 crc kubenswrapper[4814]: I0122 06:21:48.229703 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-58687b7457-jl86n"] Jan 22 06:21:48 crc kubenswrapper[4814]: I0122 06:21:48.246676 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-58687b7457-jl86n"] Jan 22 06:21:48 crc kubenswrapper[4814]: I0122 06:21:48.343407 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:21:48 crc kubenswrapper[4814]: E0122 06:21:48.343696 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:21:48 crc kubenswrapper[4814]: I0122 06:21:48.361057 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" path="/var/lib/kubelet/pods/722c584f-63e7-4817-b5f3-14915fbfe930/volumes" Jan 22 06:22:00 crc kubenswrapper[4814]: I0122 06:22:00.343864 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:22:00 crc kubenswrapper[4814]: E0122 06:22:00.344458 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:22:13 crc kubenswrapper[4814]: I0122 06:22:13.344421 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:22:13 crc kubenswrapper[4814]: E0122 06:22:13.345447 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:22:27 crc kubenswrapper[4814]: I0122 06:22:27.344561 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:22:27 crc kubenswrapper[4814]: E0122 06:22:27.346598 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:22:42 crc kubenswrapper[4814]: I0122 06:22:42.358259 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:22:42 crc kubenswrapper[4814]: E0122 06:22:42.362384 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:22:55 crc kubenswrapper[4814]: I0122 06:22:55.343546 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:22:55 crc kubenswrapper[4814]: E0122 06:22:55.344439 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:23:10 crc kubenswrapper[4814]: I0122 06:23:10.344751 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:23:10 crc kubenswrapper[4814]: E0122 06:23:10.345358 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:23:22 crc kubenswrapper[4814]: I0122 06:23:22.344143 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:23:22 crc kubenswrapper[4814]: E0122 06:23:22.344966 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:23:35 crc kubenswrapper[4814]: I0122 06:23:35.343974 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:23:35 crc kubenswrapper[4814]: E0122 06:23:35.344979 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:23:46 crc kubenswrapper[4814]: I0122 06:23:46.344106 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:23:46 crc kubenswrapper[4814]: E0122 06:23:46.344920 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:24:00 crc kubenswrapper[4814]: I0122 06:24:00.343485 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:24:00 crc kubenswrapper[4814]: E0122 06:24:00.344736 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:24:13 crc kubenswrapper[4814]: I0122 06:24:13.343715 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:24:13 crc kubenswrapper[4814]: E0122 06:24:13.345286 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:24:27 crc kubenswrapper[4814]: I0122 06:24:27.343580 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:24:27 crc kubenswrapper[4814]: I0122 06:24:27.884194 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"fbd583bbb1b911fef503ea01e1ff3718f0a5c2f03ad3f36dfb8c2b61334ec8ef"} Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.142127 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b5bwh"] Jan 22 06:25:06 crc kubenswrapper[4814]: E0122 06:25:06.143695 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-httpd" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.143739 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-httpd" Jan 22 06:25:06 crc kubenswrapper[4814]: E0122 06:25:06.143798 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-api" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.143819 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-api" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.144194 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-api" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.144239 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="722c584f-63e7-4817-b5f3-14915fbfe930" containerName="neutron-httpd" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.147765 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.157783 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5bwh"] Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.292881 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfnv4\" (UniqueName: \"kubernetes.io/projected/1173f099-5efb-4f51-81fe-53be3eb6cd95-kube-api-access-qfnv4\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.293170 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-catalog-content\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.293230 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-utilities\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.395392 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfnv4\" (UniqueName: \"kubernetes.io/projected/1173f099-5efb-4f51-81fe-53be3eb6cd95-kube-api-access-qfnv4\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.395821 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-catalog-content\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.395987 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-utilities\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.396531 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-catalog-content\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.396819 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-utilities\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.418734 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfnv4\" (UniqueName: \"kubernetes.io/projected/1173f099-5efb-4f51-81fe-53be3eb6cd95-kube-api-access-qfnv4\") pod \"redhat-operators-b5bwh\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:06 crc kubenswrapper[4814]: I0122 06:25:06.479287 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:07 crc kubenswrapper[4814]: I0122 06:25:07.457104 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5bwh"] Jan 22 06:25:07 crc kubenswrapper[4814]: I0122 06:25:07.477333 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5bwh" event={"ID":"1173f099-5efb-4f51-81fe-53be3eb6cd95","Type":"ContainerStarted","Data":"a1ecd3534e786ceafd1b9342b77f2bac72b656e6e5c2a760a52d9740ebd044ee"} Jan 22 06:25:08 crc kubenswrapper[4814]: I0122 06:25:08.488979 4814 generic.go:334] "Generic (PLEG): container finished" podID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerID="11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6" exitCode=0 Jan 22 06:25:08 crc kubenswrapper[4814]: I0122 06:25:08.489192 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5bwh" event={"ID":"1173f099-5efb-4f51-81fe-53be3eb6cd95","Type":"ContainerDied","Data":"11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6"} Jan 22 06:25:08 crc kubenswrapper[4814]: I0122 06:25:08.491424 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:25:10 crc kubenswrapper[4814]: I0122 06:25:10.517162 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5bwh" event={"ID":"1173f099-5efb-4f51-81fe-53be3eb6cd95","Type":"ContainerStarted","Data":"8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c"} Jan 22 06:25:15 crc kubenswrapper[4814]: I0122 06:25:15.576602 4814 generic.go:334] "Generic (PLEG): container finished" podID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerID="8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c" exitCode=0 Jan 22 06:25:15 crc kubenswrapper[4814]: I0122 06:25:15.576685 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5bwh" event={"ID":"1173f099-5efb-4f51-81fe-53be3eb6cd95","Type":"ContainerDied","Data":"8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c"} Jan 22 06:25:16 crc kubenswrapper[4814]: I0122 06:25:16.590166 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5bwh" event={"ID":"1173f099-5efb-4f51-81fe-53be3eb6cd95","Type":"ContainerStarted","Data":"1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9"} Jan 22 06:25:16 crc kubenswrapper[4814]: I0122 06:25:16.621744 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b5bwh" podStartSLOduration=3.124851146 podStartE2EDuration="10.621710467s" podCreationTimestamp="2026-01-22 06:25:06 +0000 UTC" firstStartedPulling="2026-01-22 06:25:08.49086485 +0000 UTC m=+3994.574353075" lastFinishedPulling="2026-01-22 06:25:15.987724181 +0000 UTC m=+4002.071212396" observedRunningTime="2026-01-22 06:25:16.61822871 +0000 UTC m=+4002.701716935" watchObservedRunningTime="2026-01-22 06:25:16.621710467 +0000 UTC m=+4002.705198762" Jan 22 06:25:26 crc kubenswrapper[4814]: I0122 06:25:26.479813 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:26 crc kubenswrapper[4814]: I0122 06:25:26.482816 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:27 crc kubenswrapper[4814]: I0122 06:25:27.550074 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5bwh" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="registry-server" probeResult="failure" output=< Jan 22 06:25:27 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:25:27 crc kubenswrapper[4814]: > Jan 22 06:25:36 crc kubenswrapper[4814]: I0122 06:25:36.533747 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:36 crc kubenswrapper[4814]: I0122 06:25:36.620188 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:37 crc kubenswrapper[4814]: I0122 06:25:37.341725 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5bwh"] Jan 22 06:25:38 crc kubenswrapper[4814]: I0122 06:25:38.164361 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b5bwh" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="registry-server" containerID="cri-o://1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9" gracePeriod=2 Jan 22 06:25:38 crc kubenswrapper[4814]: I0122 06:25:38.855813 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:38 crc kubenswrapper[4814]: I0122 06:25:38.997525 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfnv4\" (UniqueName: \"kubernetes.io/projected/1173f099-5efb-4f51-81fe-53be3eb6cd95-kube-api-access-qfnv4\") pod \"1173f099-5efb-4f51-81fe-53be3eb6cd95\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " Jan 22 06:25:38 crc kubenswrapper[4814]: I0122 06:25:38.997568 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-catalog-content\") pod \"1173f099-5efb-4f51-81fe-53be3eb6cd95\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " Jan 22 06:25:38 crc kubenswrapper[4814]: I0122 06:25:38.997866 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-utilities\") pod \"1173f099-5efb-4f51-81fe-53be3eb6cd95\" (UID: \"1173f099-5efb-4f51-81fe-53be3eb6cd95\") " Jan 22 06:25:38 crc kubenswrapper[4814]: I0122 06:25:38.998844 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-utilities" (OuterVolumeSpecName: "utilities") pod "1173f099-5efb-4f51-81fe-53be3eb6cd95" (UID: "1173f099-5efb-4f51-81fe-53be3eb6cd95"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.005832 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1173f099-5efb-4f51-81fe-53be3eb6cd95-kube-api-access-qfnv4" (OuterVolumeSpecName: "kube-api-access-qfnv4") pod "1173f099-5efb-4f51-81fe-53be3eb6cd95" (UID: "1173f099-5efb-4f51-81fe-53be3eb6cd95"). InnerVolumeSpecName "kube-api-access-qfnv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.100464 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.100507 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfnv4\" (UniqueName: \"kubernetes.io/projected/1173f099-5efb-4f51-81fe-53be3eb6cd95-kube-api-access-qfnv4\") on node \"crc\" DevicePath \"\"" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.143386 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1173f099-5efb-4f51-81fe-53be3eb6cd95" (UID: "1173f099-5efb-4f51-81fe-53be3eb6cd95"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.176885 4814 generic.go:334] "Generic (PLEG): container finished" podID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerID="1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9" exitCode=0 Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.176947 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5bwh" event={"ID":"1173f099-5efb-4f51-81fe-53be3eb6cd95","Type":"ContainerDied","Data":"1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9"} Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.176983 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5bwh" event={"ID":"1173f099-5efb-4f51-81fe-53be3eb6cd95","Type":"ContainerDied","Data":"a1ecd3534e786ceafd1b9342b77f2bac72b656e6e5c2a760a52d9740ebd044ee"} Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.177013 4814 scope.go:117] "RemoveContainer" containerID="1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.177192 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5bwh" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.199555 4814 scope.go:117] "RemoveContainer" containerID="8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.203365 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1173f099-5efb-4f51-81fe-53be3eb6cd95-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.228159 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5bwh"] Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.244825 4814 scope.go:117] "RemoveContainer" containerID="11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.254765 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b5bwh"] Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.301823 4814 scope.go:117] "RemoveContainer" containerID="1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9" Jan 22 06:25:39 crc kubenswrapper[4814]: E0122 06:25:39.302242 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9\": container with ID starting with 1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9 not found: ID does not exist" containerID="1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.302278 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9"} err="failed to get container status \"1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9\": rpc error: code = NotFound desc = could not find container \"1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9\": container with ID starting with 1604c4f21af19d11f1e7e6730df40fb698a2ade5d1364bf16a69c21a437b04d9 not found: ID does not exist" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.302304 4814 scope.go:117] "RemoveContainer" containerID="8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c" Jan 22 06:25:39 crc kubenswrapper[4814]: E0122 06:25:39.302684 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c\": container with ID starting with 8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c not found: ID does not exist" containerID="8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.302712 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c"} err="failed to get container status \"8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c\": rpc error: code = NotFound desc = could not find container \"8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c\": container with ID starting with 8067790fc7b4a2d15499a7b54f4a07f939aa8052e50954bad2a3b18124384d7c not found: ID does not exist" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.302728 4814 scope.go:117] "RemoveContainer" containerID="11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6" Jan 22 06:25:39 crc kubenswrapper[4814]: E0122 06:25:39.303118 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6\": container with ID starting with 11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6 not found: ID does not exist" containerID="11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6" Jan 22 06:25:39 crc kubenswrapper[4814]: I0122 06:25:39.303238 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6"} err="failed to get container status \"11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6\": rpc error: code = NotFound desc = could not find container \"11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6\": container with ID starting with 11f90dd31fb434167b2434438306ab4586b6bb980f11e0fc1f08f4c33b88aea6 not found: ID does not exist" Jan 22 06:25:40 crc kubenswrapper[4814]: I0122 06:25:40.359621 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" path="/var/lib/kubelet/pods/1173f099-5efb-4f51-81fe-53be3eb6cd95/volumes" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.086746 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vc7lg"] Jan 22 06:26:03 crc kubenswrapper[4814]: E0122 06:26:03.087849 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="registry-server" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.087864 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="registry-server" Jan 22 06:26:03 crc kubenswrapper[4814]: E0122 06:26:03.087890 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="extract-content" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.087898 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="extract-content" Jan 22 06:26:03 crc kubenswrapper[4814]: E0122 06:26:03.087910 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="extract-utilities" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.087916 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="extract-utilities" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.088106 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="1173f099-5efb-4f51-81fe-53be3eb6cd95" containerName="registry-server" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.089441 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.125324 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vc7lg"] Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.224839 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msw4d\" (UniqueName: \"kubernetes.io/projected/bf8cade3-ac9f-4b23-8592-12437480b4d2-kube-api-access-msw4d\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.224960 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-catalog-content\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.225020 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-utilities\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.326646 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-catalog-content\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.326764 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-utilities\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.326861 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msw4d\" (UniqueName: \"kubernetes.io/projected/bf8cade3-ac9f-4b23-8592-12437480b4d2-kube-api-access-msw4d\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.327373 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-catalog-content\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.327396 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-utilities\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.687822 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9zmz7"] Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.690925 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.713226 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zmz7"] Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.776864 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msw4d\" (UniqueName: \"kubernetes.io/projected/bf8cade3-ac9f-4b23-8592-12437480b4d2-kube-api-access-msw4d\") pod \"community-operators-vc7lg\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.836264 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-catalog-content\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.836351 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-utilities\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.836376 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwfmd\" (UniqueName: \"kubernetes.io/projected/ddaf9ca9-8934-47c3-913d-f6d95c010120-kube-api-access-rwfmd\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.938368 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-catalog-content\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.938430 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-utilities\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.938450 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwfmd\" (UniqueName: \"kubernetes.io/projected/ddaf9ca9-8934-47c3-913d-f6d95c010120-kube-api-access-rwfmd\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.938833 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-catalog-content\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.938890 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-utilities\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:03 crc kubenswrapper[4814]: I0122 06:26:03.957916 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwfmd\" (UniqueName: \"kubernetes.io/projected/ddaf9ca9-8934-47c3-913d-f6d95c010120-kube-api-access-rwfmd\") pod \"redhat-marketplace-9zmz7\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:04 crc kubenswrapper[4814]: I0122 06:26:04.020607 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:04 crc kubenswrapper[4814]: I0122 06:26:04.024028 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:04 crc kubenswrapper[4814]: I0122 06:26:04.674945 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vc7lg"] Jan 22 06:26:04 crc kubenswrapper[4814]: I0122 06:26:04.791917 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zmz7"] Jan 22 06:26:05 crc kubenswrapper[4814]: W0122 06:26:05.078077 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podddaf9ca9_8934_47c3_913d_f6d95c010120.slice/crio-85a294c360aa315c11ebf65074de0bfd9402d191fca49a663b8c7dce58da4b43 WatchSource:0}: Error finding container 85a294c360aa315c11ebf65074de0bfd9402d191fca49a663b8c7dce58da4b43: Status 404 returned error can't find the container with id 85a294c360aa315c11ebf65074de0bfd9402d191fca49a663b8c7dce58da4b43 Jan 22 06:26:05 crc kubenswrapper[4814]: I0122 06:26:05.470041 4814 generic.go:334] "Generic (PLEG): container finished" podID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerID="2919e83a3e98102f34d7eda44eef0091bfd4ea23ec6b77f8282db469c2cd899a" exitCode=0 Jan 22 06:26:05 crc kubenswrapper[4814]: I0122 06:26:05.470389 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vc7lg" event={"ID":"bf8cade3-ac9f-4b23-8592-12437480b4d2","Type":"ContainerDied","Data":"2919e83a3e98102f34d7eda44eef0091bfd4ea23ec6b77f8282db469c2cd899a"} Jan 22 06:26:05 crc kubenswrapper[4814]: I0122 06:26:05.470415 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vc7lg" event={"ID":"bf8cade3-ac9f-4b23-8592-12437480b4d2","Type":"ContainerStarted","Data":"563fa14b1ca5abbaeb83c49b2e8427da28427a03440569f1a1c791a259a36537"} Jan 22 06:26:05 crc kubenswrapper[4814]: I0122 06:26:05.475453 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zmz7" event={"ID":"ddaf9ca9-8934-47c3-913d-f6d95c010120","Type":"ContainerStarted","Data":"85a294c360aa315c11ebf65074de0bfd9402d191fca49a663b8c7dce58da4b43"} Jan 22 06:26:06 crc kubenswrapper[4814]: I0122 06:26:06.486089 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vc7lg" event={"ID":"bf8cade3-ac9f-4b23-8592-12437480b4d2","Type":"ContainerStarted","Data":"07171b68ae64353cf7d089655d923ef4f64979f094c74f752e6c9bd0719cafd1"} Jan 22 06:26:06 crc kubenswrapper[4814]: I0122 06:26:06.488356 4814 generic.go:334] "Generic (PLEG): container finished" podID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerID="4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3" exitCode=0 Jan 22 06:26:06 crc kubenswrapper[4814]: I0122 06:26:06.488407 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zmz7" event={"ID":"ddaf9ca9-8934-47c3-913d-f6d95c010120","Type":"ContainerDied","Data":"4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3"} Jan 22 06:26:08 crc kubenswrapper[4814]: I0122 06:26:08.504535 4814 generic.go:334] "Generic (PLEG): container finished" podID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerID="07171b68ae64353cf7d089655d923ef4f64979f094c74f752e6c9bd0719cafd1" exitCode=0 Jan 22 06:26:08 crc kubenswrapper[4814]: I0122 06:26:08.504620 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vc7lg" event={"ID":"bf8cade3-ac9f-4b23-8592-12437480b4d2","Type":"ContainerDied","Data":"07171b68ae64353cf7d089655d923ef4f64979f094c74f752e6c9bd0719cafd1"} Jan 22 06:26:08 crc kubenswrapper[4814]: I0122 06:26:08.509744 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zmz7" event={"ID":"ddaf9ca9-8934-47c3-913d-f6d95c010120","Type":"ContainerStarted","Data":"77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0"} Jan 22 06:26:10 crc kubenswrapper[4814]: I0122 06:26:10.535238 4814 generic.go:334] "Generic (PLEG): container finished" podID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerID="77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0" exitCode=0 Jan 22 06:26:10 crc kubenswrapper[4814]: I0122 06:26:10.535311 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zmz7" event={"ID":"ddaf9ca9-8934-47c3-913d-f6d95c010120","Type":"ContainerDied","Data":"77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0"} Jan 22 06:26:11 crc kubenswrapper[4814]: I0122 06:26:11.548293 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vc7lg" event={"ID":"bf8cade3-ac9f-4b23-8592-12437480b4d2","Type":"ContainerStarted","Data":"bf4fe6d911d761073d85bc3eb209b6792eb846f433e8f15a67d32cd754196e8e"} Jan 22 06:26:11 crc kubenswrapper[4814]: I0122 06:26:11.552101 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zmz7" event={"ID":"ddaf9ca9-8934-47c3-913d-f6d95c010120","Type":"ContainerStarted","Data":"e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334"} Jan 22 06:26:11 crc kubenswrapper[4814]: I0122 06:26:11.569160 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vc7lg" podStartSLOduration=3.735514434 podStartE2EDuration="8.569142937s" podCreationTimestamp="2026-01-22 06:26:03 +0000 UTC" firstStartedPulling="2026-01-22 06:26:05.472847456 +0000 UTC m=+4051.556335671" lastFinishedPulling="2026-01-22 06:26:10.306475949 +0000 UTC m=+4056.389964174" observedRunningTime="2026-01-22 06:26:11.564252247 +0000 UTC m=+4057.647740462" watchObservedRunningTime="2026-01-22 06:26:11.569142937 +0000 UTC m=+4057.652631152" Jan 22 06:26:11 crc kubenswrapper[4814]: I0122 06:26:11.589704 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9zmz7" podStartSLOduration=4.148166888 podStartE2EDuration="8.589683427s" podCreationTimestamp="2026-01-22 06:26:03 +0000 UTC" firstStartedPulling="2026-01-22 06:26:06.490187606 +0000 UTC m=+4052.573675821" lastFinishedPulling="2026-01-22 06:26:10.931704105 +0000 UTC m=+4057.015192360" observedRunningTime="2026-01-22 06:26:11.583849168 +0000 UTC m=+4057.667337393" watchObservedRunningTime="2026-01-22 06:26:11.589683427 +0000 UTC m=+4057.673171642" Jan 22 06:26:14 crc kubenswrapper[4814]: I0122 06:26:14.021665 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:14 crc kubenswrapper[4814]: I0122 06:26:14.022014 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:14 crc kubenswrapper[4814]: I0122 06:26:14.024725 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:14 crc kubenswrapper[4814]: I0122 06:26:14.024786 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:14 crc kubenswrapper[4814]: I0122 06:26:14.367386 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:15 crc kubenswrapper[4814]: I0122 06:26:15.370128 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-vc7lg" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="registry-server" probeResult="failure" output=< Jan 22 06:26:15 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:26:15 crc kubenswrapper[4814]: > Jan 22 06:26:24 crc kubenswrapper[4814]: I0122 06:26:24.109760 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:24 crc kubenswrapper[4814]: I0122 06:26:24.125537 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:24 crc kubenswrapper[4814]: I0122 06:26:24.183203 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zmz7"] Jan 22 06:26:24 crc kubenswrapper[4814]: I0122 06:26:24.207518 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:24 crc kubenswrapper[4814]: I0122 06:26:24.695243 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9zmz7" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerName="registry-server" containerID="cri-o://e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334" gracePeriod=2 Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.164728 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vc7lg"] Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.353587 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.420059 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwfmd\" (UniqueName: \"kubernetes.io/projected/ddaf9ca9-8934-47c3-913d-f6d95c010120-kube-api-access-rwfmd\") pod \"ddaf9ca9-8934-47c3-913d-f6d95c010120\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.420448 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-catalog-content\") pod \"ddaf9ca9-8934-47c3-913d-f6d95c010120\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.420714 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-utilities\") pod \"ddaf9ca9-8934-47c3-913d-f6d95c010120\" (UID: \"ddaf9ca9-8934-47c3-913d-f6d95c010120\") " Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.421858 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-utilities" (OuterVolumeSpecName: "utilities") pod "ddaf9ca9-8934-47c3-913d-f6d95c010120" (UID: "ddaf9ca9-8934-47c3-913d-f6d95c010120"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.437102 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddaf9ca9-8934-47c3-913d-f6d95c010120-kube-api-access-rwfmd" (OuterVolumeSpecName: "kube-api-access-rwfmd") pod "ddaf9ca9-8934-47c3-913d-f6d95c010120" (UID: "ddaf9ca9-8934-47c3-913d-f6d95c010120"). InnerVolumeSpecName "kube-api-access-rwfmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.444825 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ddaf9ca9-8934-47c3-913d-f6d95c010120" (UID: "ddaf9ca9-8934-47c3-913d-f6d95c010120"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.523929 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.523975 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwfmd\" (UniqueName: \"kubernetes.io/projected/ddaf9ca9-8934-47c3-913d-f6d95c010120-kube-api-access-rwfmd\") on node \"crc\" DevicePath \"\"" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.523988 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaf9ca9-8934-47c3-913d-f6d95c010120-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.706995 4814 generic.go:334] "Generic (PLEG): container finished" podID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerID="e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334" exitCode=0 Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.707310 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vc7lg" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="registry-server" containerID="cri-o://bf4fe6d911d761073d85bc3eb209b6792eb846f433e8f15a67d32cd754196e8e" gracePeriod=2 Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.707848 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zmz7" event={"ID":"ddaf9ca9-8934-47c3-913d-f6d95c010120","Type":"ContainerDied","Data":"e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334"} Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.707894 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9zmz7" event={"ID":"ddaf9ca9-8934-47c3-913d-f6d95c010120","Type":"ContainerDied","Data":"85a294c360aa315c11ebf65074de0bfd9402d191fca49a663b8c7dce58da4b43"} Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.707920 4814 scope.go:117] "RemoveContainer" containerID="e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.707894 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9zmz7" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.737956 4814 scope.go:117] "RemoveContainer" containerID="77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0" Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.755174 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zmz7"] Jan 22 06:26:25 crc kubenswrapper[4814]: I0122 06:26:25.766010 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9zmz7"] Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.358199 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" path="/var/lib/kubelet/pods/ddaf9ca9-8934-47c3-913d-f6d95c010120/volumes" Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.388128 4814 scope.go:117] "RemoveContainer" containerID="4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3" Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.651558 4814 scope.go:117] "RemoveContainer" containerID="e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334" Jan 22 06:26:26 crc kubenswrapper[4814]: E0122 06:26:26.652226 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334\": container with ID starting with e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334 not found: ID does not exist" containerID="e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334" Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.652288 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334"} err="failed to get container status \"e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334\": rpc error: code = NotFound desc = could not find container \"e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334\": container with ID starting with e4878d086d3e0c4bd852290fe758aef916f352b99bb233087204a04f9a50a334 not found: ID does not exist" Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.652312 4814 scope.go:117] "RemoveContainer" containerID="77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0" Jan 22 06:26:26 crc kubenswrapper[4814]: E0122 06:26:26.652793 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0\": container with ID starting with 77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0 not found: ID does not exist" containerID="77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0" Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.652855 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0"} err="failed to get container status \"77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0\": rpc error: code = NotFound desc = could not find container \"77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0\": container with ID starting with 77b00802d66c01c584b19f427265e1e80a89d013a7f356e1d84358b900ed6ef0 not found: ID does not exist" Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.652890 4814 scope.go:117] "RemoveContainer" containerID="4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3" Jan 22 06:26:26 crc kubenswrapper[4814]: E0122 06:26:26.653390 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3\": container with ID starting with 4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3 not found: ID does not exist" containerID="4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3" Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.653434 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3"} err="failed to get container status \"4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3\": rpc error: code = NotFound desc = could not find container \"4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3\": container with ID starting with 4e9ea95cd32a21cc2a54aa7a834a8f9c573ed479892f8a1da6c27283d5d1eda3 not found: ID does not exist" Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.716375 4814 generic.go:334] "Generic (PLEG): container finished" podID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerID="bf4fe6d911d761073d85bc3eb209b6792eb846f433e8f15a67d32cd754196e8e" exitCode=0 Jan 22 06:26:26 crc kubenswrapper[4814]: I0122 06:26:26.716441 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vc7lg" event={"ID":"bf8cade3-ac9f-4b23-8592-12437480b4d2","Type":"ContainerDied","Data":"bf4fe6d911d761073d85bc3eb209b6792eb846f433e8f15a67d32cd754196e8e"} Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.247757 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.362451 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-catalog-content\") pod \"bf8cade3-ac9f-4b23-8592-12437480b4d2\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.362553 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msw4d\" (UniqueName: \"kubernetes.io/projected/bf8cade3-ac9f-4b23-8592-12437480b4d2-kube-api-access-msw4d\") pod \"bf8cade3-ac9f-4b23-8592-12437480b4d2\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.362590 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-utilities\") pod \"bf8cade3-ac9f-4b23-8592-12437480b4d2\" (UID: \"bf8cade3-ac9f-4b23-8592-12437480b4d2\") " Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.363424 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-utilities" (OuterVolumeSpecName: "utilities") pod "bf8cade3-ac9f-4b23-8592-12437480b4d2" (UID: "bf8cade3-ac9f-4b23-8592-12437480b4d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.380119 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf8cade3-ac9f-4b23-8592-12437480b4d2-kube-api-access-msw4d" (OuterVolumeSpecName: "kube-api-access-msw4d") pod "bf8cade3-ac9f-4b23-8592-12437480b4d2" (UID: "bf8cade3-ac9f-4b23-8592-12437480b4d2"). InnerVolumeSpecName "kube-api-access-msw4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.428667 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf8cade3-ac9f-4b23-8592-12437480b4d2" (UID: "bf8cade3-ac9f-4b23-8592-12437480b4d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.465587 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msw4d\" (UniqueName: \"kubernetes.io/projected/bf8cade3-ac9f-4b23-8592-12437480b4d2-kube-api-access-msw4d\") on node \"crc\" DevicePath \"\"" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.465623 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.465646 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf8cade3-ac9f-4b23-8592-12437480b4d2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.729646 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vc7lg" event={"ID":"bf8cade3-ac9f-4b23-8592-12437480b4d2","Type":"ContainerDied","Data":"563fa14b1ca5abbaeb83c49b2e8427da28427a03440569f1a1c791a259a36537"} Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.729701 4814 scope.go:117] "RemoveContainer" containerID="bf4fe6d911d761073d85bc3eb209b6792eb846f433e8f15a67d32cd754196e8e" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.729971 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vc7lg" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.776181 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vc7lg"] Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.777009 4814 scope.go:117] "RemoveContainer" containerID="07171b68ae64353cf7d089655d923ef4f64979f094c74f752e6c9bd0719cafd1" Jan 22 06:26:27 crc kubenswrapper[4814]: I0122 06:26:27.787139 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vc7lg"] Jan 22 06:26:28 crc kubenswrapper[4814]: I0122 06:26:28.366580 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" path="/var/lib/kubelet/pods/bf8cade3-ac9f-4b23-8592-12437480b4d2/volumes" Jan 22 06:26:28 crc kubenswrapper[4814]: I0122 06:26:28.395422 4814 scope.go:117] "RemoveContainer" containerID="2919e83a3e98102f34d7eda44eef0091bfd4ea23ec6b77f8282db469c2cd899a" Jan 22 06:26:49 crc kubenswrapper[4814]: I0122 06:26:49.613724 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:26:49 crc kubenswrapper[4814]: I0122 06:26:49.614823 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:27:19 crc kubenswrapper[4814]: I0122 06:27:19.617933 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:27:19 crc kubenswrapper[4814]: I0122 06:27:19.618978 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:27:49 crc kubenswrapper[4814]: I0122 06:27:49.614498 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:27:49 crc kubenswrapper[4814]: I0122 06:27:49.614919 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:27:49 crc kubenswrapper[4814]: I0122 06:27:49.614968 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:27:49 crc kubenswrapper[4814]: I0122 06:27:49.616099 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fbd583bbb1b911fef503ea01e1ff3718f0a5c2f03ad3f36dfb8c2b61334ec8ef"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:27:49 crc kubenswrapper[4814]: I0122 06:27:49.616151 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://fbd583bbb1b911fef503ea01e1ff3718f0a5c2f03ad3f36dfb8c2b61334ec8ef" gracePeriod=600 Jan 22 06:27:50 crc kubenswrapper[4814]: I0122 06:27:50.549926 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="fbd583bbb1b911fef503ea01e1ff3718f0a5c2f03ad3f36dfb8c2b61334ec8ef" exitCode=0 Jan 22 06:27:50 crc kubenswrapper[4814]: I0122 06:27:50.550001 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"fbd583bbb1b911fef503ea01e1ff3718f0a5c2f03ad3f36dfb8c2b61334ec8ef"} Jan 22 06:27:50 crc kubenswrapper[4814]: I0122 06:27:50.550512 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b"} Jan 22 06:27:50 crc kubenswrapper[4814]: I0122 06:27:50.550538 4814 scope.go:117] "RemoveContainer" containerID="74216c7eff46dc65678e6e9b096aadcf786f61fc1a0ec7064b790a90dab3e6f1" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.122381 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rlsql"] Jan 22 06:28:32 crc kubenswrapper[4814]: E0122 06:28:32.123470 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="extract-content" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.123490 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="extract-content" Jan 22 06:28:32 crc kubenswrapper[4814]: E0122 06:28:32.123509 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="registry-server" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.123518 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="registry-server" Jan 22 06:28:32 crc kubenswrapper[4814]: E0122 06:28:32.123550 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerName="extract-content" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.123567 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerName="extract-content" Jan 22 06:28:32 crc kubenswrapper[4814]: E0122 06:28:32.123579 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerName="registry-server" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.123587 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerName="registry-server" Jan 22 06:28:32 crc kubenswrapper[4814]: E0122 06:28:32.123606 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="extract-utilities" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.123614 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="extract-utilities" Jan 22 06:28:32 crc kubenswrapper[4814]: E0122 06:28:32.123748 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerName="extract-utilities" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.123759 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerName="extract-utilities" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.124028 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddaf9ca9-8934-47c3-913d-f6d95c010120" containerName="registry-server" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.124048 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf8cade3-ac9f-4b23-8592-12437480b4d2" containerName="registry-server" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.126889 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.138252 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rlsql"] Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.295124 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59sqj\" (UniqueName: \"kubernetes.io/projected/2a3725ef-3535-4668-b625-0fcaee01ff58-kube-api-access-59sqj\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.295391 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-utilities\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.295943 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-catalog-content\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.397468 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59sqj\" (UniqueName: \"kubernetes.io/projected/2a3725ef-3535-4668-b625-0fcaee01ff58-kube-api-access-59sqj\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.397537 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-utilities\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.397759 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-catalog-content\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.398024 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-utilities\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.399081 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-catalog-content\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.476161 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59sqj\" (UniqueName: \"kubernetes.io/projected/2a3725ef-3535-4668-b625-0fcaee01ff58-kube-api-access-59sqj\") pod \"certified-operators-rlsql\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:32 crc kubenswrapper[4814]: I0122 06:28:32.750465 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:33 crc kubenswrapper[4814]: I0122 06:28:33.236689 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rlsql"] Jan 22 06:28:34 crc kubenswrapper[4814]: I0122 06:28:34.012709 4814 generic.go:334] "Generic (PLEG): container finished" podID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerID="0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b" exitCode=0 Jan 22 06:28:34 crc kubenswrapper[4814]: I0122 06:28:34.013434 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlsql" event={"ID":"2a3725ef-3535-4668-b625-0fcaee01ff58","Type":"ContainerDied","Data":"0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b"} Jan 22 06:28:34 crc kubenswrapper[4814]: I0122 06:28:34.014706 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlsql" event={"ID":"2a3725ef-3535-4668-b625-0fcaee01ff58","Type":"ContainerStarted","Data":"bf47addd11dbef57bdd6ab1b18cee992c1cba2aa28ea10e41c19b2570c96aec0"} Jan 22 06:28:37 crc kubenswrapper[4814]: I0122 06:28:37.067279 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlsql" event={"ID":"2a3725ef-3535-4668-b625-0fcaee01ff58","Type":"ContainerStarted","Data":"9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987"} Jan 22 06:28:39 crc kubenswrapper[4814]: I0122 06:28:39.081782 4814 generic.go:334] "Generic (PLEG): container finished" podID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerID="9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987" exitCode=0 Jan 22 06:28:39 crc kubenswrapper[4814]: I0122 06:28:39.081859 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlsql" event={"ID":"2a3725ef-3535-4668-b625-0fcaee01ff58","Type":"ContainerDied","Data":"9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987"} Jan 22 06:28:40 crc kubenswrapper[4814]: I0122 06:28:40.105095 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlsql" event={"ID":"2a3725ef-3535-4668-b625-0fcaee01ff58","Type":"ContainerStarted","Data":"4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd"} Jan 22 06:28:40 crc kubenswrapper[4814]: I0122 06:28:40.146427 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rlsql" podStartSLOduration=2.5761407419999998 podStartE2EDuration="8.146405993s" podCreationTimestamp="2026-01-22 06:28:32 +0000 UTC" firstStartedPulling="2026-01-22 06:28:34.016484635 +0000 UTC m=+4200.099972840" lastFinishedPulling="2026-01-22 06:28:39.586749866 +0000 UTC m=+4205.670238091" observedRunningTime="2026-01-22 06:28:40.137789983 +0000 UTC m=+4206.221278228" watchObservedRunningTime="2026-01-22 06:28:40.146405993 +0000 UTC m=+4206.229894198" Jan 22 06:28:42 crc kubenswrapper[4814]: I0122 06:28:42.751501 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:42 crc kubenswrapper[4814]: I0122 06:28:42.751848 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:43 crc kubenswrapper[4814]: I0122 06:28:43.805326 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-rlsql" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="registry-server" probeResult="failure" output=< Jan 22 06:28:43 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:28:43 crc kubenswrapper[4814]: > Jan 22 06:28:52 crc kubenswrapper[4814]: I0122 06:28:52.823243 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:52 crc kubenswrapper[4814]: I0122 06:28:52.870809 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:53 crc kubenswrapper[4814]: I0122 06:28:53.065425 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rlsql"] Jan 22 06:28:54 crc kubenswrapper[4814]: I0122 06:28:54.238495 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rlsql" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="registry-server" containerID="cri-o://4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd" gracePeriod=2 Jan 22 06:28:54 crc kubenswrapper[4814]: I0122 06:28:54.783677 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:54 crc kubenswrapper[4814]: I0122 06:28:54.978785 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-utilities\") pod \"2a3725ef-3535-4668-b625-0fcaee01ff58\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " Jan 22 06:28:54 crc kubenswrapper[4814]: I0122 06:28:54.978976 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-catalog-content\") pod \"2a3725ef-3535-4668-b625-0fcaee01ff58\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " Jan 22 06:28:54 crc kubenswrapper[4814]: I0122 06:28:54.979121 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59sqj\" (UniqueName: \"kubernetes.io/projected/2a3725ef-3535-4668-b625-0fcaee01ff58-kube-api-access-59sqj\") pod \"2a3725ef-3535-4668-b625-0fcaee01ff58\" (UID: \"2a3725ef-3535-4668-b625-0fcaee01ff58\") " Jan 22 06:28:54 crc kubenswrapper[4814]: I0122 06:28:54.980244 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-utilities" (OuterVolumeSpecName: "utilities") pod "2a3725ef-3535-4668-b625-0fcaee01ff58" (UID: "2a3725ef-3535-4668-b625-0fcaee01ff58"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:28:54 crc kubenswrapper[4814]: I0122 06:28:54.993847 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a3725ef-3535-4668-b625-0fcaee01ff58-kube-api-access-59sqj" (OuterVolumeSpecName: "kube-api-access-59sqj") pod "2a3725ef-3535-4668-b625-0fcaee01ff58" (UID: "2a3725ef-3535-4668-b625-0fcaee01ff58"). InnerVolumeSpecName "kube-api-access-59sqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.053109 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a3725ef-3535-4668-b625-0fcaee01ff58" (UID: "2a3725ef-3535-4668-b625-0fcaee01ff58"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.081825 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.081870 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a3725ef-3535-4668-b625-0fcaee01ff58-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.081883 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59sqj\" (UniqueName: \"kubernetes.io/projected/2a3725ef-3535-4668-b625-0fcaee01ff58-kube-api-access-59sqj\") on node \"crc\" DevicePath \"\"" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.249926 4814 generic.go:334] "Generic (PLEG): container finished" podID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerID="4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd" exitCode=0 Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.250004 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlsql" event={"ID":"2a3725ef-3535-4668-b625-0fcaee01ff58","Type":"ContainerDied","Data":"4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd"} Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.250072 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlsql" event={"ID":"2a3725ef-3535-4668-b625-0fcaee01ff58","Type":"ContainerDied","Data":"bf47addd11dbef57bdd6ab1b18cee992c1cba2aa28ea10e41c19b2570c96aec0"} Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.250096 4814 scope.go:117] "RemoveContainer" containerID="4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.251187 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rlsql" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.293986 4814 scope.go:117] "RemoveContainer" containerID="9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.302243 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rlsql"] Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.325030 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rlsql"] Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.337898 4814 scope.go:117] "RemoveContainer" containerID="0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.387112 4814 scope.go:117] "RemoveContainer" containerID="4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd" Jan 22 06:28:55 crc kubenswrapper[4814]: E0122 06:28:55.388286 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd\": container with ID starting with 4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd not found: ID does not exist" containerID="4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.388365 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd"} err="failed to get container status \"4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd\": rpc error: code = NotFound desc = could not find container \"4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd\": container with ID starting with 4ca7b2a52042a2753ba9b23d9838d9748b1150b5f02946166f9a66bedc2334cd not found: ID does not exist" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.388432 4814 scope.go:117] "RemoveContainer" containerID="9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987" Jan 22 06:28:55 crc kubenswrapper[4814]: E0122 06:28:55.388922 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987\": container with ID starting with 9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987 not found: ID does not exist" containerID="9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.388966 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987"} err="failed to get container status \"9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987\": rpc error: code = NotFound desc = could not find container \"9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987\": container with ID starting with 9af32f798539b224cd80854359a4403ed735865d1753bec15ebc0542f5554987 not found: ID does not exist" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.389001 4814 scope.go:117] "RemoveContainer" containerID="0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b" Jan 22 06:28:55 crc kubenswrapper[4814]: E0122 06:28:55.389469 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b\": container with ID starting with 0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b not found: ID does not exist" containerID="0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b" Jan 22 06:28:55 crc kubenswrapper[4814]: I0122 06:28:55.389513 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b"} err="failed to get container status \"0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b\": rpc error: code = NotFound desc = could not find container \"0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b\": container with ID starting with 0ed85471192c22d8dcbd49416aff651f2d2651386c0726886574e82fb8cbfb7b not found: ID does not exist" Jan 22 06:28:56 crc kubenswrapper[4814]: I0122 06:28:56.357506 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" path="/var/lib/kubelet/pods/2a3725ef-3535-4668-b625-0fcaee01ff58/volumes" Jan 22 06:29:49 crc kubenswrapper[4814]: I0122 06:29:49.613917 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:29:49 crc kubenswrapper[4814]: I0122 06:29:49.614486 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.203429 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x"] Jan 22 06:30:00 crc kubenswrapper[4814]: E0122 06:30:00.204375 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.204389 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4814]: E0122 06:30:00.204401 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="extract-content" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.204408 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="extract-content" Jan 22 06:30:00 crc kubenswrapper[4814]: E0122 06:30:00.204432 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="extract-utilities" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.204442 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="extract-utilities" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.204668 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a3725ef-3535-4668-b625-0fcaee01ff58" containerName="registry-server" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.205326 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.209687 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.211411 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.231495 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x"] Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.366744 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-config-volume\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.366848 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-secret-volume\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.366877 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78mx8\" (UniqueName: \"kubernetes.io/projected/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-kube-api-access-78mx8\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.468774 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-secret-volume\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.468834 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78mx8\" (UniqueName: \"kubernetes.io/projected/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-kube-api-access-78mx8\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.468979 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-config-volume\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.470681 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-config-volume\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.474402 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-secret-volume\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.488500 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78mx8\" (UniqueName: \"kubernetes.io/projected/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-kube-api-access-78mx8\") pod \"collect-profiles-29484390-skh9x\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: I0122 06:30:00.531484 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:00 crc kubenswrapper[4814]: W0122 06:30:00.991699 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc26a8a4_ff94_436b_bcaf_19d9087c7a15.slice/crio-433096ca7811707cab9cbc12bdf987a3df9a668a8ed371e9f048aaa5551f7d12 WatchSource:0}: Error finding container 433096ca7811707cab9cbc12bdf987a3df9a668a8ed371e9f048aaa5551f7d12: Status 404 returned error can't find the container with id 433096ca7811707cab9cbc12bdf987a3df9a668a8ed371e9f048aaa5551f7d12 Jan 22 06:30:01 crc kubenswrapper[4814]: I0122 06:30:01.008744 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x"] Jan 22 06:30:01 crc kubenswrapper[4814]: I0122 06:30:01.873160 4814 generic.go:334] "Generic (PLEG): container finished" podID="fc26a8a4-ff94-436b-bcaf-19d9087c7a15" containerID="2e66639dead14e8236b406800d4e4ec83d37b9fdbf0fafdc0a5dd68d9a885ddd" exitCode=0 Jan 22 06:30:01 crc kubenswrapper[4814]: I0122 06:30:01.873372 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" event={"ID":"fc26a8a4-ff94-436b-bcaf-19d9087c7a15","Type":"ContainerDied","Data":"2e66639dead14e8236b406800d4e4ec83d37b9fdbf0fafdc0a5dd68d9a885ddd"} Jan 22 06:30:01 crc kubenswrapper[4814]: I0122 06:30:01.874724 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" event={"ID":"fc26a8a4-ff94-436b-bcaf-19d9087c7a15","Type":"ContainerStarted","Data":"433096ca7811707cab9cbc12bdf987a3df9a668a8ed371e9f048aaa5551f7d12"} Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.264244 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.322203 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78mx8\" (UniqueName: \"kubernetes.io/projected/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-kube-api-access-78mx8\") pod \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.322564 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-config-volume\") pod \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.322918 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-secret-volume\") pod \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\" (UID: \"fc26a8a4-ff94-436b-bcaf-19d9087c7a15\") " Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.323146 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-config-volume" (OuterVolumeSpecName: "config-volume") pod "fc26a8a4-ff94-436b-bcaf-19d9087c7a15" (UID: "fc26a8a4-ff94-436b-bcaf-19d9087c7a15"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.324228 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.330736 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fc26a8a4-ff94-436b-bcaf-19d9087c7a15" (UID: "fc26a8a4-ff94-436b-bcaf-19d9087c7a15"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.340470 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-kube-api-access-78mx8" (OuterVolumeSpecName: "kube-api-access-78mx8") pod "fc26a8a4-ff94-436b-bcaf-19d9087c7a15" (UID: "fc26a8a4-ff94-436b-bcaf-19d9087c7a15"). InnerVolumeSpecName "kube-api-access-78mx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.427098 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78mx8\" (UniqueName: \"kubernetes.io/projected/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-kube-api-access-78mx8\") on node \"crc\" DevicePath \"\"" Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.427134 4814 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc26a8a4-ff94-436b-bcaf-19d9087c7a15-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.894418 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" event={"ID":"fc26a8a4-ff94-436b-bcaf-19d9087c7a15","Type":"ContainerDied","Data":"433096ca7811707cab9cbc12bdf987a3df9a668a8ed371e9f048aaa5551f7d12"} Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.894844 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="433096ca7811707cab9cbc12bdf987a3df9a668a8ed371e9f048aaa5551f7d12" Jan 22 06:30:03 crc kubenswrapper[4814]: I0122 06:30:03.894465 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484390-skh9x" Jan 22 06:30:04 crc kubenswrapper[4814]: I0122 06:30:04.374372 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7"] Jan 22 06:30:04 crc kubenswrapper[4814]: I0122 06:30:04.376783 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484345-rstm7"] Jan 22 06:30:06 crc kubenswrapper[4814]: I0122 06:30:06.355910 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2" path="/var/lib/kubelet/pods/9f3d08ae-5cc6-4bb5-b1b2-b20296f3d0e2/volumes" Jan 22 06:30:19 crc kubenswrapper[4814]: I0122 06:30:19.613591 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:30:19 crc kubenswrapper[4814]: I0122 06:30:19.614265 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:30:44 crc kubenswrapper[4814]: I0122 06:30:44.552454 4814 scope.go:117] "RemoveContainer" containerID="5ca5b842c285f83ff6c889c29c7b7e3fd8aa7b512c89391b3126656c1c613105" Jan 22 06:30:49 crc kubenswrapper[4814]: I0122 06:30:49.614228 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:30:49 crc kubenswrapper[4814]: I0122 06:30:49.614951 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:30:49 crc kubenswrapper[4814]: I0122 06:30:49.615022 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:30:49 crc kubenswrapper[4814]: I0122 06:30:49.616065 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:30:49 crc kubenswrapper[4814]: I0122 06:30:49.616140 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" gracePeriod=600 Jan 22 06:30:49 crc kubenswrapper[4814]: E0122 06:30:49.739137 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:30:50 crc kubenswrapper[4814]: I0122 06:30:50.361518 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" exitCode=0 Jan 22 06:30:50 crc kubenswrapper[4814]: I0122 06:30:50.364575 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b"} Jan 22 06:30:50 crc kubenswrapper[4814]: I0122 06:30:50.364800 4814 scope.go:117] "RemoveContainer" containerID="fbd583bbb1b911fef503ea01e1ff3718f0a5c2f03ad3f36dfb8c2b61334ec8ef" Jan 22 06:30:50 crc kubenswrapper[4814]: I0122 06:30:50.365511 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:30:50 crc kubenswrapper[4814]: E0122 06:30:50.365792 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:31:03 crc kubenswrapper[4814]: I0122 06:31:03.343573 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:31:03 crc kubenswrapper[4814]: E0122 06:31:03.344345 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:31:14 crc kubenswrapper[4814]: I0122 06:31:14.350616 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:31:14 crc kubenswrapper[4814]: E0122 06:31:14.351260 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:31:28 crc kubenswrapper[4814]: I0122 06:31:28.344715 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:31:28 crc kubenswrapper[4814]: E0122 06:31:28.345688 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:31:42 crc kubenswrapper[4814]: I0122 06:31:42.344291 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:31:42 crc kubenswrapper[4814]: E0122 06:31:42.345594 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:31:54 crc kubenswrapper[4814]: I0122 06:31:54.350064 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:31:54 crc kubenswrapper[4814]: E0122 06:31:54.350954 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:32:08 crc kubenswrapper[4814]: I0122 06:32:08.344369 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:32:08 crc kubenswrapper[4814]: E0122 06:32:08.347351 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:32:21 crc kubenswrapper[4814]: I0122 06:32:21.343551 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:32:21 crc kubenswrapper[4814]: E0122 06:32:21.345393 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:32:35 crc kubenswrapper[4814]: I0122 06:32:35.344021 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:32:35 crc kubenswrapper[4814]: E0122 06:32:35.345036 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:32:46 crc kubenswrapper[4814]: I0122 06:32:46.343882 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:32:46 crc kubenswrapper[4814]: E0122 06:32:46.345011 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:32:59 crc kubenswrapper[4814]: I0122 06:32:59.345051 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:32:59 crc kubenswrapper[4814]: E0122 06:32:59.345833 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:33:12 crc kubenswrapper[4814]: I0122 06:33:12.344543 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:33:12 crc kubenswrapper[4814]: E0122 06:33:12.345679 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:33:23 crc kubenswrapper[4814]: I0122 06:33:23.343734 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:33:23 crc kubenswrapper[4814]: E0122 06:33:23.344485 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:33:38 crc kubenswrapper[4814]: I0122 06:33:38.343941 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:33:38 crc kubenswrapper[4814]: E0122 06:33:38.344691 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:33:49 crc kubenswrapper[4814]: I0122 06:33:49.344363 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:33:49 crc kubenswrapper[4814]: E0122 06:33:49.345129 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:34:01 crc kubenswrapper[4814]: I0122 06:34:01.343478 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:34:01 crc kubenswrapper[4814]: E0122 06:34:01.344335 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:34:16 crc kubenswrapper[4814]: I0122 06:34:16.345585 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:34:16 crc kubenswrapper[4814]: E0122 06:34:16.346857 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:34:31 crc kubenswrapper[4814]: I0122 06:34:31.344203 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:34:31 crc kubenswrapper[4814]: E0122 06:34:31.345146 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:34:43 crc kubenswrapper[4814]: I0122 06:34:43.344144 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:34:43 crc kubenswrapper[4814]: E0122 06:34:43.344871 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:34:58 crc kubenswrapper[4814]: I0122 06:34:58.343859 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:34:58 crc kubenswrapper[4814]: E0122 06:34:58.344599 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:35:11 crc kubenswrapper[4814]: I0122 06:35:11.344005 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:35:11 crc kubenswrapper[4814]: E0122 06:35:11.344761 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.389929 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wxc7s"] Jan 22 06:35:15 crc kubenswrapper[4814]: E0122 06:35:15.392489 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc26a8a4-ff94-436b-bcaf-19d9087c7a15" containerName="collect-profiles" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.392659 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc26a8a4-ff94-436b-bcaf-19d9087c7a15" containerName="collect-profiles" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.393121 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc26a8a4-ff94-436b-bcaf-19d9087c7a15" containerName="collect-profiles" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.402251 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.432762 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wxc7s"] Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.509848 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-utilities\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.510296 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wprhb\" (UniqueName: \"kubernetes.io/projected/a18b4693-d304-465c-8ce3-496e03a6cd89-kube-api-access-wprhb\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.510331 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-catalog-content\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.612527 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wprhb\" (UniqueName: \"kubernetes.io/projected/a18b4693-d304-465c-8ce3-496e03a6cd89-kube-api-access-wprhb\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.612812 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-catalog-content\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.613272 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-catalog-content\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.613604 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-utilities\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.613996 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-utilities\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.637487 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wprhb\" (UniqueName: \"kubernetes.io/projected/a18b4693-d304-465c-8ce3-496e03a6cd89-kube-api-access-wprhb\") pod \"redhat-operators-wxc7s\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:15 crc kubenswrapper[4814]: I0122 06:35:15.720795 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:16 crc kubenswrapper[4814]: I0122 06:35:16.237194 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wxc7s"] Jan 22 06:35:16 crc kubenswrapper[4814]: I0122 06:35:16.995513 4814 generic.go:334] "Generic (PLEG): container finished" podID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerID="83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486" exitCode=0 Jan 22 06:35:16 crc kubenswrapper[4814]: I0122 06:35:16.995614 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxc7s" event={"ID":"a18b4693-d304-465c-8ce3-496e03a6cd89","Type":"ContainerDied","Data":"83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486"} Jan 22 06:35:16 crc kubenswrapper[4814]: I0122 06:35:16.995793 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxc7s" event={"ID":"a18b4693-d304-465c-8ce3-496e03a6cd89","Type":"ContainerStarted","Data":"d89a62e4f1a5885af67d1075cf4caf2a435a2e77ac2d076e9b0cf8b42f00f173"} Jan 22 06:35:16 crc kubenswrapper[4814]: I0122 06:35:16.998903 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:35:18 crc kubenswrapper[4814]: I0122 06:35:18.008114 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxc7s" event={"ID":"a18b4693-d304-465c-8ce3-496e03a6cd89","Type":"ContainerStarted","Data":"4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6"} Jan 22 06:35:22 crc kubenswrapper[4814]: I0122 06:35:22.047894 4814 generic.go:334] "Generic (PLEG): container finished" podID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerID="4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6" exitCode=0 Jan 22 06:35:22 crc kubenswrapper[4814]: I0122 06:35:22.048000 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxc7s" event={"ID":"a18b4693-d304-465c-8ce3-496e03a6cd89","Type":"ContainerDied","Data":"4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6"} Jan 22 06:35:23 crc kubenswrapper[4814]: I0122 06:35:23.056760 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxc7s" event={"ID":"a18b4693-d304-465c-8ce3-496e03a6cd89","Type":"ContainerStarted","Data":"0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a"} Jan 22 06:35:23 crc kubenswrapper[4814]: I0122 06:35:23.075357 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wxc7s" podStartSLOduration=2.633866049 podStartE2EDuration="8.075342514s" podCreationTimestamp="2026-01-22 06:35:15 +0000 UTC" firstStartedPulling="2026-01-22 06:35:16.998693078 +0000 UTC m=+4603.082181293" lastFinishedPulling="2026-01-22 06:35:22.440169543 +0000 UTC m=+4608.523657758" observedRunningTime="2026-01-22 06:35:23.074553799 +0000 UTC m=+4609.158042014" watchObservedRunningTime="2026-01-22 06:35:23.075342514 +0000 UTC m=+4609.158830729" Jan 22 06:35:24 crc kubenswrapper[4814]: I0122 06:35:24.348906 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:35:24 crc kubenswrapper[4814]: E0122 06:35:24.349361 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:35:25 crc kubenswrapper[4814]: I0122 06:35:25.722075 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:25 crc kubenswrapper[4814]: I0122 06:35:25.723489 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:26 crc kubenswrapper[4814]: I0122 06:35:26.771854 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wxc7s" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="registry-server" probeResult="failure" output=< Jan 22 06:35:26 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:35:26 crc kubenswrapper[4814]: > Jan 22 06:35:36 crc kubenswrapper[4814]: I0122 06:35:36.789151 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wxc7s" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="registry-server" probeResult="failure" output=< Jan 22 06:35:36 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:35:36 crc kubenswrapper[4814]: > Jan 22 06:35:37 crc kubenswrapper[4814]: I0122 06:35:37.344612 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:35:37 crc kubenswrapper[4814]: E0122 06:35:37.345209 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:35:45 crc kubenswrapper[4814]: I0122 06:35:45.787945 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:45 crc kubenswrapper[4814]: I0122 06:35:45.867313 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:46 crc kubenswrapper[4814]: I0122 06:35:46.586926 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wxc7s"] Jan 22 06:35:47 crc kubenswrapper[4814]: I0122 06:35:47.320301 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wxc7s" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="registry-server" containerID="cri-o://0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a" gracePeriod=2 Jan 22 06:35:47 crc kubenswrapper[4814]: I0122 06:35:47.938192 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.097058 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wprhb\" (UniqueName: \"kubernetes.io/projected/a18b4693-d304-465c-8ce3-496e03a6cd89-kube-api-access-wprhb\") pod \"a18b4693-d304-465c-8ce3-496e03a6cd89\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.097179 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-utilities\") pod \"a18b4693-d304-465c-8ce3-496e03a6cd89\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.097389 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-catalog-content\") pod \"a18b4693-d304-465c-8ce3-496e03a6cd89\" (UID: \"a18b4693-d304-465c-8ce3-496e03a6cd89\") " Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.098144 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-utilities" (OuterVolumeSpecName: "utilities") pod "a18b4693-d304-465c-8ce3-496e03a6cd89" (UID: "a18b4693-d304-465c-8ce3-496e03a6cd89"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.105951 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a18b4693-d304-465c-8ce3-496e03a6cd89-kube-api-access-wprhb" (OuterVolumeSpecName: "kube-api-access-wprhb") pod "a18b4693-d304-465c-8ce3-496e03a6cd89" (UID: "a18b4693-d304-465c-8ce3-496e03a6cd89"). InnerVolumeSpecName "kube-api-access-wprhb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.199406 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wprhb\" (UniqueName: \"kubernetes.io/projected/a18b4693-d304-465c-8ce3-496e03a6cd89-kube-api-access-wprhb\") on node \"crc\" DevicePath \"\"" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.199450 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.225505 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a18b4693-d304-465c-8ce3-496e03a6cd89" (UID: "a18b4693-d304-465c-8ce3-496e03a6cd89"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.300875 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a18b4693-d304-465c-8ce3-496e03a6cd89-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.328856 4814 generic.go:334] "Generic (PLEG): container finished" podID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerID="0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a" exitCode=0 Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.328892 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxc7s" event={"ID":"a18b4693-d304-465c-8ce3-496e03a6cd89","Type":"ContainerDied","Data":"0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a"} Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.328942 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxc7s" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.329757 4814 scope.go:117] "RemoveContainer" containerID="0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.329688 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxc7s" event={"ID":"a18b4693-d304-465c-8ce3-496e03a6cd89","Type":"ContainerDied","Data":"d89a62e4f1a5885af67d1075cf4caf2a435a2e77ac2d076e9b0cf8b42f00f173"} Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.367306 4814 scope.go:117] "RemoveContainer" containerID="4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.368665 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wxc7s"] Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.378376 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wxc7s"] Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.386840 4814 scope.go:117] "RemoveContainer" containerID="83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.442383 4814 scope.go:117] "RemoveContainer" containerID="0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a" Jan 22 06:35:48 crc kubenswrapper[4814]: E0122 06:35:48.442851 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a\": container with ID starting with 0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a not found: ID does not exist" containerID="0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.442887 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a"} err="failed to get container status \"0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a\": rpc error: code = NotFound desc = could not find container \"0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a\": container with ID starting with 0eec3207e79a7af9315c36f783860a079cd914352f92e152ecf966e8039ef45a not found: ID does not exist" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.442907 4814 scope.go:117] "RemoveContainer" containerID="4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6" Jan 22 06:35:48 crc kubenswrapper[4814]: E0122 06:35:48.443283 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6\": container with ID starting with 4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6 not found: ID does not exist" containerID="4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.443303 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6"} err="failed to get container status \"4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6\": rpc error: code = NotFound desc = could not find container \"4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6\": container with ID starting with 4e14631b0e7a092e001f6aea6be82037e86b302ce5261449fe5ae72720c8c3e6 not found: ID does not exist" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.443318 4814 scope.go:117] "RemoveContainer" containerID="83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486" Jan 22 06:35:48 crc kubenswrapper[4814]: E0122 06:35:48.443778 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486\": container with ID starting with 83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486 not found: ID does not exist" containerID="83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486" Jan 22 06:35:48 crc kubenswrapper[4814]: I0122 06:35:48.443829 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486"} err="failed to get container status \"83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486\": rpc error: code = NotFound desc = could not find container \"83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486\": container with ID starting with 83dd7fc0b7e1f37a3493e5d344a5b3a9392b11e3ef0142a7475397cd94cab486 not found: ID does not exist" Jan 22 06:35:50 crc kubenswrapper[4814]: I0122 06:35:50.355244 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" path="/var/lib/kubelet/pods/a18b4693-d304-465c-8ce3-496e03a6cd89/volumes" Jan 22 06:35:52 crc kubenswrapper[4814]: I0122 06:35:52.343655 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:35:53 crc kubenswrapper[4814]: I0122 06:35:53.384927 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"eb9b0c2de3b38d3ab0e0c16bb8149d436c95e80cf9967278219839ea7c9d10aa"} Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.196415 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jdzkq"] Jan 22 06:36:33 crc kubenswrapper[4814]: E0122 06:36:33.197744 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="extract-utilities" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.197769 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="extract-utilities" Jan 22 06:36:33 crc kubenswrapper[4814]: E0122 06:36:33.197802 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="registry-server" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.197816 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="registry-server" Jan 22 06:36:33 crc kubenswrapper[4814]: E0122 06:36:33.197855 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="extract-content" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.197869 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="extract-content" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.198118 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a18b4693-d304-465c-8ce3-496e03a6cd89" containerName="registry-server" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.199709 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.213097 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdzkq"] Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.219557 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdmck\" (UniqueName: \"kubernetes.io/projected/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-kube-api-access-rdmck\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.219661 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-utilities\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.219772 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-catalog-content\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.320933 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-catalog-content\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.321058 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdmck\" (UniqueName: \"kubernetes.io/projected/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-kube-api-access-rdmck\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.321094 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-utilities\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.321455 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-catalog-content\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.321550 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-utilities\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.352644 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdmck\" (UniqueName: \"kubernetes.io/projected/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-kube-api-access-rdmck\") pod \"redhat-marketplace-jdzkq\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:33 crc kubenswrapper[4814]: I0122 06:36:33.535418 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:34 crc kubenswrapper[4814]: I0122 06:36:34.140659 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdzkq"] Jan 22 06:36:34 crc kubenswrapper[4814]: I0122 06:36:34.763130 4814 generic.go:334] "Generic (PLEG): container finished" podID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerID="93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009" exitCode=0 Jan 22 06:36:34 crc kubenswrapper[4814]: I0122 06:36:34.763235 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdzkq" event={"ID":"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9","Type":"ContainerDied","Data":"93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009"} Jan 22 06:36:34 crc kubenswrapper[4814]: I0122 06:36:34.763720 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdzkq" event={"ID":"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9","Type":"ContainerStarted","Data":"00d4ba1d3817f3ca5d08679f875f498d2107c5ba99da752371110da7cd07f38d"} Jan 22 06:36:36 crc kubenswrapper[4814]: I0122 06:36:36.778983 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdzkq" event={"ID":"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9","Type":"ContainerStarted","Data":"7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8"} Jan 22 06:36:37 crc kubenswrapper[4814]: I0122 06:36:37.789401 4814 generic.go:334] "Generic (PLEG): container finished" podID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerID="7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8" exitCode=0 Jan 22 06:36:37 crc kubenswrapper[4814]: I0122 06:36:37.789476 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdzkq" event={"ID":"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9","Type":"ContainerDied","Data":"7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8"} Jan 22 06:36:38 crc kubenswrapper[4814]: I0122 06:36:38.832958 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdzkq" event={"ID":"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9","Type":"ContainerStarted","Data":"9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098"} Jan 22 06:36:38 crc kubenswrapper[4814]: I0122 06:36:38.857902 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jdzkq" podStartSLOduration=2.232914668 podStartE2EDuration="5.857881365s" podCreationTimestamp="2026-01-22 06:36:33 +0000 UTC" firstStartedPulling="2026-01-22 06:36:34.76491679 +0000 UTC m=+4680.848405005" lastFinishedPulling="2026-01-22 06:36:38.389883457 +0000 UTC m=+4684.473371702" observedRunningTime="2026-01-22 06:36:38.855187942 +0000 UTC m=+4684.938676157" watchObservedRunningTime="2026-01-22 06:36:38.857881365 +0000 UTC m=+4684.941369580" Jan 22 06:36:43 crc kubenswrapper[4814]: I0122 06:36:43.537413 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:43 crc kubenswrapper[4814]: I0122 06:36:43.538042 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:43 crc kubenswrapper[4814]: I0122 06:36:43.604251 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:43 crc kubenswrapper[4814]: I0122 06:36:43.956327 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:44 crc kubenswrapper[4814]: I0122 06:36:44.013242 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdzkq"] Jan 22 06:36:45 crc kubenswrapper[4814]: I0122 06:36:45.909343 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jdzkq" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerName="registry-server" containerID="cri-o://9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098" gracePeriod=2 Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.405278 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.521400 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-utilities\") pod \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.521551 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdmck\" (UniqueName: \"kubernetes.io/projected/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-kube-api-access-rdmck\") pod \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.521589 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-catalog-content\") pod \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\" (UID: \"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9\") " Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.526922 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-utilities" (OuterVolumeSpecName: "utilities") pod "31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" (UID: "31e19dfe-ba34-4928-a9bf-2c1c7271a3c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.527857 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-kube-api-access-rdmck" (OuterVolumeSpecName: "kube-api-access-rdmck") pod "31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" (UID: "31e19dfe-ba34-4928-a9bf-2c1c7271a3c9"). InnerVolumeSpecName "kube-api-access-rdmck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.543616 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" (UID: "31e19dfe-ba34-4928-a9bf-2c1c7271a3c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.624027 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdmck\" (UniqueName: \"kubernetes.io/projected/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-kube-api-access-rdmck\") on node \"crc\" DevicePath \"\"" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.624335 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.624346 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.920607 4814 generic.go:334] "Generic (PLEG): container finished" podID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerID="9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098" exitCode=0 Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.920729 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jdzkq" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.920734 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdzkq" event={"ID":"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9","Type":"ContainerDied","Data":"9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098"} Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.920804 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdzkq" event={"ID":"31e19dfe-ba34-4928-a9bf-2c1c7271a3c9","Type":"ContainerDied","Data":"00d4ba1d3817f3ca5d08679f875f498d2107c5ba99da752371110da7cd07f38d"} Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.920839 4814 scope.go:117] "RemoveContainer" containerID="9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.950544 4814 scope.go:117] "RemoveContainer" containerID="7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.982035 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdzkq"] Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.991603 4814 scope.go:117] "RemoveContainer" containerID="93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009" Jan 22 06:36:46 crc kubenswrapper[4814]: I0122 06:36:46.994138 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdzkq"] Jan 22 06:36:47 crc kubenswrapper[4814]: I0122 06:36:47.031008 4814 scope.go:117] "RemoveContainer" containerID="9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098" Jan 22 06:36:47 crc kubenswrapper[4814]: E0122 06:36:47.031332 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098\": container with ID starting with 9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098 not found: ID does not exist" containerID="9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098" Jan 22 06:36:47 crc kubenswrapper[4814]: I0122 06:36:47.031453 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098"} err="failed to get container status \"9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098\": rpc error: code = NotFound desc = could not find container \"9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098\": container with ID starting with 9fb0ee25c7372c6cd24e39663eefaca6e1568d9935b730d007280dd579ce1098 not found: ID does not exist" Jan 22 06:36:47 crc kubenswrapper[4814]: I0122 06:36:47.031490 4814 scope.go:117] "RemoveContainer" containerID="7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8" Jan 22 06:36:47 crc kubenswrapper[4814]: E0122 06:36:47.032190 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8\": container with ID starting with 7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8 not found: ID does not exist" containerID="7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8" Jan 22 06:36:47 crc kubenswrapper[4814]: I0122 06:36:47.032235 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8"} err="failed to get container status \"7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8\": rpc error: code = NotFound desc = could not find container \"7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8\": container with ID starting with 7669b9186cc062b0732ae247b054ac5692323983cdaeb7b8ec031fe91e794df8 not found: ID does not exist" Jan 22 06:36:47 crc kubenswrapper[4814]: I0122 06:36:47.032261 4814 scope.go:117] "RemoveContainer" containerID="93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009" Jan 22 06:36:47 crc kubenswrapper[4814]: E0122 06:36:47.033503 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009\": container with ID starting with 93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009 not found: ID does not exist" containerID="93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009" Jan 22 06:36:47 crc kubenswrapper[4814]: I0122 06:36:47.033572 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009"} err="failed to get container status \"93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009\": rpc error: code = NotFound desc = could not find container \"93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009\": container with ID starting with 93d239aa5a9f11481a3896776f13bd16bb6b04a5b1b832fd60ebe41bba590009 not found: ID does not exist" Jan 22 06:36:48 crc kubenswrapper[4814]: I0122 06:36:48.354879 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" path="/var/lib/kubelet/pods/31e19dfe-ba34-4928-a9bf-2c1c7271a3c9/volumes" Jan 22 06:38:19 crc kubenswrapper[4814]: I0122 06:38:19.614055 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:38:19 crc kubenswrapper[4814]: I0122 06:38:19.615028 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:38:49 crc kubenswrapper[4814]: I0122 06:38:49.614677 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:38:49 crc kubenswrapper[4814]: I0122 06:38:49.615242 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:38:54 crc kubenswrapper[4814]: E0122 06:38:54.139593 4814 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.110:34318->38.102.83.110:39385: write tcp 38.102.83.110:34318->38.102.83.110:39385: write: broken pipe Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.358297 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tn84j"] Jan 22 06:39:16 crc kubenswrapper[4814]: E0122 06:39:16.359291 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerName="extract-utilities" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.359311 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerName="extract-utilities" Jan 22 06:39:16 crc kubenswrapper[4814]: E0122 06:39:16.359346 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerName="registry-server" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.359358 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerName="registry-server" Jan 22 06:39:16 crc kubenswrapper[4814]: E0122 06:39:16.359389 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerName="extract-content" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.359400 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerName="extract-content" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.359781 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="31e19dfe-ba34-4928-a9bf-2c1c7271a3c9" containerName="registry-server" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.361988 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.367866 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tn84j"] Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.479161 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-utilities\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.479286 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmc2t\" (UniqueName: \"kubernetes.io/projected/7079b780-14d9-4f52-9771-395f2fc10951-kube-api-access-wmc2t\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.479336 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-catalog-content\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.581965 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-utilities\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.582065 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmc2t\" (UniqueName: \"kubernetes.io/projected/7079b780-14d9-4f52-9771-395f2fc10951-kube-api-access-wmc2t\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.582108 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-catalog-content\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.582574 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-utilities\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.582702 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-catalog-content\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.602130 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmc2t\" (UniqueName: \"kubernetes.io/projected/7079b780-14d9-4f52-9771-395f2fc10951-kube-api-access-wmc2t\") pod \"certified-operators-tn84j\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:16 crc kubenswrapper[4814]: I0122 06:39:16.688902 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:17 crc kubenswrapper[4814]: I0122 06:39:17.303382 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tn84j"] Jan 22 06:39:17 crc kubenswrapper[4814]: I0122 06:39:17.458599 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn84j" event={"ID":"7079b780-14d9-4f52-9771-395f2fc10951","Type":"ContainerStarted","Data":"0360ebf4f53d19a20f35c6c932ea8f3ac16c35d155581fcf5e603b0bfb45826b"} Jan 22 06:39:18 crc kubenswrapper[4814]: I0122 06:39:18.469434 4814 generic.go:334] "Generic (PLEG): container finished" podID="7079b780-14d9-4f52-9771-395f2fc10951" containerID="3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f" exitCode=0 Jan 22 06:39:18 crc kubenswrapper[4814]: I0122 06:39:18.469560 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn84j" event={"ID":"7079b780-14d9-4f52-9771-395f2fc10951","Type":"ContainerDied","Data":"3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f"} Jan 22 06:39:19 crc kubenswrapper[4814]: I0122 06:39:19.484010 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn84j" event={"ID":"7079b780-14d9-4f52-9771-395f2fc10951","Type":"ContainerStarted","Data":"0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce"} Jan 22 06:39:19 crc kubenswrapper[4814]: I0122 06:39:19.613666 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:39:19 crc kubenswrapper[4814]: I0122 06:39:19.613938 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:39:19 crc kubenswrapper[4814]: I0122 06:39:19.614054 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:39:19 crc kubenswrapper[4814]: I0122 06:39:19.614995 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eb9b0c2de3b38d3ab0e0c16bb8149d436c95e80cf9967278219839ea7c9d10aa"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:39:19 crc kubenswrapper[4814]: I0122 06:39:19.615134 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://eb9b0c2de3b38d3ab0e0c16bb8149d436c95e80cf9967278219839ea7c9d10aa" gracePeriod=600 Jan 22 06:39:20 crc kubenswrapper[4814]: I0122 06:39:20.495352 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="eb9b0c2de3b38d3ab0e0c16bb8149d436c95e80cf9967278219839ea7c9d10aa" exitCode=0 Jan 22 06:39:20 crc kubenswrapper[4814]: I0122 06:39:20.495423 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"eb9b0c2de3b38d3ab0e0c16bb8149d436c95e80cf9967278219839ea7c9d10aa"} Jan 22 06:39:20 crc kubenswrapper[4814]: I0122 06:39:20.496685 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68"} Jan 22 06:39:20 crc kubenswrapper[4814]: I0122 06:39:20.496708 4814 scope.go:117] "RemoveContainer" containerID="377f6ad48d18f3942d80d7ed5e0c0cb2eb908ad37916ba4b0daa9c77f094e70b" Jan 22 06:39:20 crc kubenswrapper[4814]: I0122 06:39:20.500102 4814 generic.go:334] "Generic (PLEG): container finished" podID="7079b780-14d9-4f52-9771-395f2fc10951" containerID="0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce" exitCode=0 Jan 22 06:39:20 crc kubenswrapper[4814]: I0122 06:39:20.500141 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn84j" event={"ID":"7079b780-14d9-4f52-9771-395f2fc10951","Type":"ContainerDied","Data":"0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce"} Jan 22 06:39:21 crc kubenswrapper[4814]: I0122 06:39:21.510163 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn84j" event={"ID":"7079b780-14d9-4f52-9771-395f2fc10951","Type":"ContainerStarted","Data":"7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155"} Jan 22 06:39:21 crc kubenswrapper[4814]: I0122 06:39:21.539847 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tn84j" podStartSLOduration=3.082745389 podStartE2EDuration="5.539828366s" podCreationTimestamp="2026-01-22 06:39:16 +0000 UTC" firstStartedPulling="2026-01-22 06:39:18.471286606 +0000 UTC m=+4844.554774851" lastFinishedPulling="2026-01-22 06:39:20.928369603 +0000 UTC m=+4847.011857828" observedRunningTime="2026-01-22 06:39:21.533843899 +0000 UTC m=+4847.617332124" watchObservedRunningTime="2026-01-22 06:39:21.539828366 +0000 UTC m=+4847.623316581" Jan 22 06:39:26 crc kubenswrapper[4814]: I0122 06:39:26.689278 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:26 crc kubenswrapper[4814]: I0122 06:39:26.689823 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:26 crc kubenswrapper[4814]: I0122 06:39:26.771437 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:27 crc kubenswrapper[4814]: I0122 06:39:27.621674 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:27 crc kubenswrapper[4814]: I0122 06:39:27.685434 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tn84j"] Jan 22 06:39:29 crc kubenswrapper[4814]: I0122 06:39:29.599017 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tn84j" podUID="7079b780-14d9-4f52-9771-395f2fc10951" containerName="registry-server" containerID="cri-o://7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155" gracePeriod=2 Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.156480 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.258257 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmc2t\" (UniqueName: \"kubernetes.io/projected/7079b780-14d9-4f52-9771-395f2fc10951-kube-api-access-wmc2t\") pod \"7079b780-14d9-4f52-9771-395f2fc10951\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.258926 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-catalog-content\") pod \"7079b780-14d9-4f52-9771-395f2fc10951\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.259094 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-utilities\") pod \"7079b780-14d9-4f52-9771-395f2fc10951\" (UID: \"7079b780-14d9-4f52-9771-395f2fc10951\") " Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.260383 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-utilities" (OuterVolumeSpecName: "utilities") pod "7079b780-14d9-4f52-9771-395f2fc10951" (UID: "7079b780-14d9-4f52-9771-395f2fc10951"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.287843 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7079b780-14d9-4f52-9771-395f2fc10951-kube-api-access-wmc2t" (OuterVolumeSpecName: "kube-api-access-wmc2t") pod "7079b780-14d9-4f52-9771-395f2fc10951" (UID: "7079b780-14d9-4f52-9771-395f2fc10951"). InnerVolumeSpecName "kube-api-access-wmc2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.355501 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7079b780-14d9-4f52-9771-395f2fc10951" (UID: "7079b780-14d9-4f52-9771-395f2fc10951"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.362048 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.362277 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7079b780-14d9-4f52-9771-395f2fc10951-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.362341 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmc2t\" (UniqueName: \"kubernetes.io/projected/7079b780-14d9-4f52-9771-395f2fc10951-kube-api-access-wmc2t\") on node \"crc\" DevicePath \"\"" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.607344 4814 generic.go:334] "Generic (PLEG): container finished" podID="7079b780-14d9-4f52-9771-395f2fc10951" containerID="7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155" exitCode=0 Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.607384 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn84j" event={"ID":"7079b780-14d9-4f52-9771-395f2fc10951","Type":"ContainerDied","Data":"7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155"} Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.607411 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tn84j" event={"ID":"7079b780-14d9-4f52-9771-395f2fc10951","Type":"ContainerDied","Data":"0360ebf4f53d19a20f35c6c932ea8f3ac16c35d155581fcf5e603b0bfb45826b"} Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.607429 4814 scope.go:117] "RemoveContainer" containerID="7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.607553 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tn84j" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.630720 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tn84j"] Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.634004 4814 scope.go:117] "RemoveContainer" containerID="0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.638985 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tn84j"] Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.653254 4814 scope.go:117] "RemoveContainer" containerID="3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.691573 4814 scope.go:117] "RemoveContainer" containerID="7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155" Jan 22 06:39:30 crc kubenswrapper[4814]: E0122 06:39:30.692269 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155\": container with ID starting with 7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155 not found: ID does not exist" containerID="7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.692311 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155"} err="failed to get container status \"7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155\": rpc error: code = NotFound desc = could not find container \"7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155\": container with ID starting with 7f6631f41f577a705cc9a6c2967afe99df72a58bcf5e6d505925e5241eef0155 not found: ID does not exist" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.692337 4814 scope.go:117] "RemoveContainer" containerID="0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce" Jan 22 06:39:30 crc kubenswrapper[4814]: E0122 06:39:30.692707 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce\": container with ID starting with 0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce not found: ID does not exist" containerID="0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.692737 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce"} err="failed to get container status \"0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce\": rpc error: code = NotFound desc = could not find container \"0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce\": container with ID starting with 0f891d1ae7e416c81f9a1d1325a5e202ba8333820f2b943391607651ad9b7fce not found: ID does not exist" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.692759 4814 scope.go:117] "RemoveContainer" containerID="3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f" Jan 22 06:39:30 crc kubenswrapper[4814]: E0122 06:39:30.693051 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f\": container with ID starting with 3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f not found: ID does not exist" containerID="3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f" Jan 22 06:39:30 crc kubenswrapper[4814]: I0122 06:39:30.693104 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f"} err="failed to get container status \"3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f\": rpc error: code = NotFound desc = could not find container \"3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f\": container with ID starting with 3663207aec3ee96fddc270998e6460affa7b4f9eeb1513bddb07660b1e32301f not found: ID does not exist" Jan 22 06:39:32 crc kubenswrapper[4814]: I0122 06:39:32.356660 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7079b780-14d9-4f52-9771-395f2fc10951" path="/var/lib/kubelet/pods/7079b780-14d9-4f52-9771-395f2fc10951/volumes" Jan 22 06:41:19 crc kubenswrapper[4814]: I0122 06:41:19.614017 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:41:19 crc kubenswrapper[4814]: I0122 06:41:19.615814 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.132906 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hbzfk"] Jan 22 06:41:27 crc kubenswrapper[4814]: E0122 06:41:27.133657 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7079b780-14d9-4f52-9771-395f2fc10951" containerName="extract-utilities" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.133669 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7079b780-14d9-4f52-9771-395f2fc10951" containerName="extract-utilities" Jan 22 06:41:27 crc kubenswrapper[4814]: E0122 06:41:27.133691 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7079b780-14d9-4f52-9771-395f2fc10951" containerName="extract-content" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.133696 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7079b780-14d9-4f52-9771-395f2fc10951" containerName="extract-content" Jan 22 06:41:27 crc kubenswrapper[4814]: E0122 06:41:27.133716 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7079b780-14d9-4f52-9771-395f2fc10951" containerName="registry-server" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.133722 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="7079b780-14d9-4f52-9771-395f2fc10951" containerName="registry-server" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.133889 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="7079b780-14d9-4f52-9771-395f2fc10951" containerName="registry-server" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.135146 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.159136 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hbzfk"] Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.250452 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbf5k\" (UniqueName: \"kubernetes.io/projected/073512fe-dbaf-4424-af25-3379588a1876-kube-api-access-qbf5k\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.250506 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-catalog-content\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.250583 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-utilities\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.352587 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-utilities\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.352762 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbf5k\" (UniqueName: \"kubernetes.io/projected/073512fe-dbaf-4424-af25-3379588a1876-kube-api-access-qbf5k\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.352785 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-catalog-content\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.353225 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-catalog-content\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.353427 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-utilities\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.371452 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbf5k\" (UniqueName: \"kubernetes.io/projected/073512fe-dbaf-4424-af25-3379588a1876-kube-api-access-qbf5k\") pod \"community-operators-hbzfk\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:27 crc kubenswrapper[4814]: I0122 06:41:27.454573 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:28 crc kubenswrapper[4814]: I0122 06:41:28.084391 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hbzfk"] Jan 22 06:41:28 crc kubenswrapper[4814]: I0122 06:41:28.861937 4814 generic.go:334] "Generic (PLEG): container finished" podID="073512fe-dbaf-4424-af25-3379588a1876" containerID="b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf" exitCode=0 Jan 22 06:41:28 crc kubenswrapper[4814]: I0122 06:41:28.862033 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hbzfk" event={"ID":"073512fe-dbaf-4424-af25-3379588a1876","Type":"ContainerDied","Data":"b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf"} Jan 22 06:41:28 crc kubenswrapper[4814]: I0122 06:41:28.862761 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hbzfk" event={"ID":"073512fe-dbaf-4424-af25-3379588a1876","Type":"ContainerStarted","Data":"279e9a35e42a16d8c0de8d82c2371af4f6438355f7deff44049b31661b42827c"} Jan 22 06:41:28 crc kubenswrapper[4814]: I0122 06:41:28.863840 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:41:29 crc kubenswrapper[4814]: I0122 06:41:29.878314 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hbzfk" event={"ID":"073512fe-dbaf-4424-af25-3379588a1876","Type":"ContainerStarted","Data":"a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161"} Jan 22 06:41:30 crc kubenswrapper[4814]: I0122 06:41:30.893246 4814 generic.go:334] "Generic (PLEG): container finished" podID="073512fe-dbaf-4424-af25-3379588a1876" containerID="a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161" exitCode=0 Jan 22 06:41:30 crc kubenswrapper[4814]: I0122 06:41:30.893384 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hbzfk" event={"ID":"073512fe-dbaf-4424-af25-3379588a1876","Type":"ContainerDied","Data":"a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161"} Jan 22 06:41:31 crc kubenswrapper[4814]: I0122 06:41:31.905842 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hbzfk" event={"ID":"073512fe-dbaf-4424-af25-3379588a1876","Type":"ContainerStarted","Data":"811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa"} Jan 22 06:41:37 crc kubenswrapper[4814]: I0122 06:41:37.455716 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:37 crc kubenswrapper[4814]: I0122 06:41:37.456253 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:37 crc kubenswrapper[4814]: I0122 06:41:37.563291 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:37 crc kubenswrapper[4814]: I0122 06:41:37.593782 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hbzfk" podStartSLOduration=8.080921862 podStartE2EDuration="10.593762146s" podCreationTimestamp="2026-01-22 06:41:27 +0000 UTC" firstStartedPulling="2026-01-22 06:41:28.863617297 +0000 UTC m=+4974.947105512" lastFinishedPulling="2026-01-22 06:41:31.376457581 +0000 UTC m=+4977.459945796" observedRunningTime="2026-01-22 06:41:31.947430432 +0000 UTC m=+4978.030918647" watchObservedRunningTime="2026-01-22 06:41:37.593762146 +0000 UTC m=+4983.677250361" Jan 22 06:41:38 crc kubenswrapper[4814]: I0122 06:41:38.012911 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.121523 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hbzfk"] Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.122147 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hbzfk" podUID="073512fe-dbaf-4424-af25-3379588a1876" containerName="registry-server" containerID="cri-o://811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa" gracePeriod=2 Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.633149 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.725068 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-catalog-content\") pod \"073512fe-dbaf-4424-af25-3379588a1876\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.725287 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbf5k\" (UniqueName: \"kubernetes.io/projected/073512fe-dbaf-4424-af25-3379588a1876-kube-api-access-qbf5k\") pod \"073512fe-dbaf-4424-af25-3379588a1876\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.725402 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-utilities\") pod \"073512fe-dbaf-4424-af25-3379588a1876\" (UID: \"073512fe-dbaf-4424-af25-3379588a1876\") " Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.726720 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-utilities" (OuterVolumeSpecName: "utilities") pod "073512fe-dbaf-4424-af25-3379588a1876" (UID: "073512fe-dbaf-4424-af25-3379588a1876"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.748063 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/073512fe-dbaf-4424-af25-3379588a1876-kube-api-access-qbf5k" (OuterVolumeSpecName: "kube-api-access-qbf5k") pod "073512fe-dbaf-4424-af25-3379588a1876" (UID: "073512fe-dbaf-4424-af25-3379588a1876"). InnerVolumeSpecName "kube-api-access-qbf5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.793334 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "073512fe-dbaf-4424-af25-3379588a1876" (UID: "073512fe-dbaf-4424-af25-3379588a1876"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.827243 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbf5k\" (UniqueName: \"kubernetes.io/projected/073512fe-dbaf-4424-af25-3379588a1876-kube-api-access-qbf5k\") on node \"crc\" DevicePath \"\"" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.827272 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.827283 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/073512fe-dbaf-4424-af25-3379588a1876-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.983978 4814 generic.go:334] "Generic (PLEG): container finished" podID="073512fe-dbaf-4424-af25-3379588a1876" containerID="811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa" exitCode=0 Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.984021 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hbzfk" event={"ID":"073512fe-dbaf-4424-af25-3379588a1876","Type":"ContainerDied","Data":"811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa"} Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.984049 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hbzfk" event={"ID":"073512fe-dbaf-4424-af25-3379588a1876","Type":"ContainerDied","Data":"279e9a35e42a16d8c0de8d82c2371af4f6438355f7deff44049b31661b42827c"} Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.984067 4814 scope.go:117] "RemoveContainer" containerID="811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa" Jan 22 06:41:40 crc kubenswrapper[4814]: I0122 06:41:40.984079 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hbzfk" Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.017018 4814 scope.go:117] "RemoveContainer" containerID="a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161" Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.042150 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hbzfk"] Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.046422 4814 scope.go:117] "RemoveContainer" containerID="b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf" Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.050268 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hbzfk"] Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.088991 4814 scope.go:117] "RemoveContainer" containerID="811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa" Jan 22 06:41:41 crc kubenswrapper[4814]: E0122 06:41:41.089519 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa\": container with ID starting with 811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa not found: ID does not exist" containerID="811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa" Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.089594 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa"} err="failed to get container status \"811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa\": rpc error: code = NotFound desc = could not find container \"811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa\": container with ID starting with 811587df8eb2a633cab97ce36c44a6b5fc36dd655813e06c20af53d6fe4677fa not found: ID does not exist" Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.089671 4814 scope.go:117] "RemoveContainer" containerID="a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161" Jan 22 06:41:41 crc kubenswrapper[4814]: E0122 06:41:41.090126 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161\": container with ID starting with a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161 not found: ID does not exist" containerID="a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161" Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.090198 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161"} err="failed to get container status \"a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161\": rpc error: code = NotFound desc = could not find container \"a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161\": container with ID starting with a16d8c89b96a6a871dabc849c2559e687020ca56c12591280157cfefdd727161 not found: ID does not exist" Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.090226 4814 scope.go:117] "RemoveContainer" containerID="b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf" Jan 22 06:41:41 crc kubenswrapper[4814]: E0122 06:41:41.090598 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf\": container with ID starting with b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf not found: ID does not exist" containerID="b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf" Jan 22 06:41:41 crc kubenswrapper[4814]: I0122 06:41:41.090676 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf"} err="failed to get container status \"b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf\": rpc error: code = NotFound desc = could not find container \"b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf\": container with ID starting with b0fd81487d0d0a42fe57a9b602d676f5b18497f71fe738a199543787e312a4cf not found: ID does not exist" Jan 22 06:41:42 crc kubenswrapper[4814]: I0122 06:41:42.360559 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="073512fe-dbaf-4424-af25-3379588a1876" path="/var/lib/kubelet/pods/073512fe-dbaf-4424-af25-3379588a1876/volumes" Jan 22 06:41:49 crc kubenswrapper[4814]: I0122 06:41:49.614395 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:41:49 crc kubenswrapper[4814]: I0122 06:41:49.615030 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:42:19 crc kubenswrapper[4814]: I0122 06:42:19.614345 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:42:19 crc kubenswrapper[4814]: I0122 06:42:19.615078 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:42:19 crc kubenswrapper[4814]: I0122 06:42:19.615145 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:42:19 crc kubenswrapper[4814]: I0122 06:42:19.615926 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:42:19 crc kubenswrapper[4814]: I0122 06:42:19.615999 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" gracePeriod=600 Jan 22 06:42:20 crc kubenswrapper[4814]: E0122 06:42:20.318235 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:42:20 crc kubenswrapper[4814]: I0122 06:42:20.413789 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" exitCode=0 Jan 22 06:42:20 crc kubenswrapper[4814]: I0122 06:42:20.413830 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68"} Jan 22 06:42:20 crc kubenswrapper[4814]: I0122 06:42:20.413896 4814 scope.go:117] "RemoveContainer" containerID="eb9b0c2de3b38d3ab0e0c16bb8149d436c95e80cf9967278219839ea7c9d10aa" Jan 22 06:42:20 crc kubenswrapper[4814]: I0122 06:42:20.414576 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:42:20 crc kubenswrapper[4814]: E0122 06:42:20.414832 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:42:33 crc kubenswrapper[4814]: I0122 06:42:33.344617 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:42:33 crc kubenswrapper[4814]: E0122 06:42:33.345528 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:42:46 crc kubenswrapper[4814]: I0122 06:42:46.344479 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:42:46 crc kubenswrapper[4814]: E0122 06:42:46.345147 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:43:01 crc kubenswrapper[4814]: I0122 06:43:01.344316 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:43:01 crc kubenswrapper[4814]: E0122 06:43:01.345376 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:43:13 crc kubenswrapper[4814]: I0122 06:43:13.343778 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:43:13 crc kubenswrapper[4814]: E0122 06:43:13.344448 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:43:26 crc kubenswrapper[4814]: I0122 06:43:26.345222 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:43:26 crc kubenswrapper[4814]: E0122 06:43:26.346496 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:43:37 crc kubenswrapper[4814]: I0122 06:43:37.343839 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:43:37 crc kubenswrapper[4814]: E0122 06:43:37.344786 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:43:51 crc kubenswrapper[4814]: I0122 06:43:51.344366 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:43:51 crc kubenswrapper[4814]: E0122 06:43:51.345318 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:44:02 crc kubenswrapper[4814]: I0122 06:44:02.343917 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:44:02 crc kubenswrapper[4814]: E0122 06:44:02.344670 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:44:14 crc kubenswrapper[4814]: I0122 06:44:14.351138 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:44:14 crc kubenswrapper[4814]: E0122 06:44:14.353238 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:44:25 crc kubenswrapper[4814]: I0122 06:44:25.344108 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:44:25 crc kubenswrapper[4814]: E0122 06:44:25.345866 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:44:36 crc kubenswrapper[4814]: I0122 06:44:36.343523 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:44:36 crc kubenswrapper[4814]: E0122 06:44:36.344540 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:44:47 crc kubenswrapper[4814]: I0122 06:44:47.344346 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:44:47 crc kubenswrapper[4814]: E0122 06:44:47.345202 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:44:58 crc kubenswrapper[4814]: I0122 06:44:58.344250 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:44:58 crc kubenswrapper[4814]: E0122 06:44:58.344933 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.160859 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2"] Jan 22 06:45:00 crc kubenswrapper[4814]: E0122 06:45:00.161837 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="073512fe-dbaf-4424-af25-3379588a1876" containerName="extract-utilities" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.161853 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="073512fe-dbaf-4424-af25-3379588a1876" containerName="extract-utilities" Jan 22 06:45:00 crc kubenswrapper[4814]: E0122 06:45:00.161869 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="073512fe-dbaf-4424-af25-3379588a1876" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.161875 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="073512fe-dbaf-4424-af25-3379588a1876" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4814]: E0122 06:45:00.161889 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="073512fe-dbaf-4424-af25-3379588a1876" containerName="extract-content" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.161895 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="073512fe-dbaf-4424-af25-3379588a1876" containerName="extract-content" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.162066 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="073512fe-dbaf-4424-af25-3379588a1876" containerName="registry-server" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.162691 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.167609 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.168474 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.171704 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2"] Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.213089 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b4e379e-027a-4fe1-8348-ac3156947371-secret-volume\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.213160 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b4e379e-027a-4fe1-8348-ac3156947371-config-volume\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.213354 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj6r8\" (UniqueName: \"kubernetes.io/projected/2b4e379e-027a-4fe1-8348-ac3156947371-kube-api-access-wj6r8\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.314863 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj6r8\" (UniqueName: \"kubernetes.io/projected/2b4e379e-027a-4fe1-8348-ac3156947371-kube-api-access-wj6r8\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.314998 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b4e379e-027a-4fe1-8348-ac3156947371-secret-volume\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.315035 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b4e379e-027a-4fe1-8348-ac3156947371-config-volume\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.318413 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b4e379e-027a-4fe1-8348-ac3156947371-config-volume\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.320986 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b4e379e-027a-4fe1-8348-ac3156947371-secret-volume\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.388111 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj6r8\" (UniqueName: \"kubernetes.io/projected/2b4e379e-027a-4fe1-8348-ac3156947371-kube-api-access-wj6r8\") pod \"collect-profiles-29484405-f5ns2\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.488127 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:00 crc kubenswrapper[4814]: I0122 06:45:00.993685 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2"] Jan 22 06:45:01 crc kubenswrapper[4814]: W0122 06:45:01.002229 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b4e379e_027a_4fe1_8348_ac3156947371.slice/crio-6f7ef9c153495f061a60ba214fde6dcd1b7f2a03088e553cf068e6657d7b152a WatchSource:0}: Error finding container 6f7ef9c153495f061a60ba214fde6dcd1b7f2a03088e553cf068e6657d7b152a: Status 404 returned error can't find the container with id 6f7ef9c153495f061a60ba214fde6dcd1b7f2a03088e553cf068e6657d7b152a Jan 22 06:45:01 crc kubenswrapper[4814]: I0122 06:45:01.979405 4814 generic.go:334] "Generic (PLEG): container finished" podID="2b4e379e-027a-4fe1-8348-ac3156947371" containerID="8e7e5d80dcb2f68179c4f5a02494417e949fb55b3963393d06bb6cc92a88fd3e" exitCode=0 Jan 22 06:45:01 crc kubenswrapper[4814]: I0122 06:45:01.979769 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" event={"ID":"2b4e379e-027a-4fe1-8348-ac3156947371","Type":"ContainerDied","Data":"8e7e5d80dcb2f68179c4f5a02494417e949fb55b3963393d06bb6cc92a88fd3e"} Jan 22 06:45:01 crc kubenswrapper[4814]: I0122 06:45:01.979832 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" event={"ID":"2b4e379e-027a-4fe1-8348-ac3156947371","Type":"ContainerStarted","Data":"6f7ef9c153495f061a60ba214fde6dcd1b7f2a03088e553cf068e6657d7b152a"} Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.464410 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.569252 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b4e379e-027a-4fe1-8348-ac3156947371-config-volume\") pod \"2b4e379e-027a-4fe1-8348-ac3156947371\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.569400 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wj6r8\" (UniqueName: \"kubernetes.io/projected/2b4e379e-027a-4fe1-8348-ac3156947371-kube-api-access-wj6r8\") pod \"2b4e379e-027a-4fe1-8348-ac3156947371\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.569463 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b4e379e-027a-4fe1-8348-ac3156947371-secret-volume\") pod \"2b4e379e-027a-4fe1-8348-ac3156947371\" (UID: \"2b4e379e-027a-4fe1-8348-ac3156947371\") " Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.570473 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b4e379e-027a-4fe1-8348-ac3156947371-config-volume" (OuterVolumeSpecName: "config-volume") pod "2b4e379e-027a-4fe1-8348-ac3156947371" (UID: "2b4e379e-027a-4fe1-8348-ac3156947371"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.575893 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b4e379e-027a-4fe1-8348-ac3156947371-kube-api-access-wj6r8" (OuterVolumeSpecName: "kube-api-access-wj6r8") pod "2b4e379e-027a-4fe1-8348-ac3156947371" (UID: "2b4e379e-027a-4fe1-8348-ac3156947371"). InnerVolumeSpecName "kube-api-access-wj6r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.575907 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4e379e-027a-4fe1-8348-ac3156947371-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2b4e379e-027a-4fe1-8348-ac3156947371" (UID: "2b4e379e-027a-4fe1-8348-ac3156947371"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.672515 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b4e379e-027a-4fe1-8348-ac3156947371-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.672563 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wj6r8\" (UniqueName: \"kubernetes.io/projected/2b4e379e-027a-4fe1-8348-ac3156947371-kube-api-access-wj6r8\") on node \"crc\" DevicePath \"\"" Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.672578 4814 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2b4e379e-027a-4fe1-8348-ac3156947371-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.999066 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" event={"ID":"2b4e379e-027a-4fe1-8348-ac3156947371","Type":"ContainerDied","Data":"6f7ef9c153495f061a60ba214fde6dcd1b7f2a03088e553cf068e6657d7b152a"} Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.999134 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f7ef9c153495f061a60ba214fde6dcd1b7f2a03088e553cf068e6657d7b152a" Jan 22 06:45:03 crc kubenswrapper[4814]: I0122 06:45:03.999907 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484405-f5ns2" Jan 22 06:45:04 crc kubenswrapper[4814]: I0122 06:45:04.576254 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9"] Jan 22 06:45:04 crc kubenswrapper[4814]: I0122 06:45:04.594869 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484360-w7nh9"] Jan 22 06:45:06 crc kubenswrapper[4814]: I0122 06:45:06.365160 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="087abff4-52e5-485f-aa9a-f4d3d607f233" path="/var/lib/kubelet/pods/087abff4-52e5-485f-aa9a-f4d3d607f233/volumes" Jan 22 06:45:12 crc kubenswrapper[4814]: I0122 06:45:12.345224 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:45:12 crc kubenswrapper[4814]: E0122 06:45:12.346094 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:45:27 crc kubenswrapper[4814]: I0122 06:45:27.343637 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:45:27 crc kubenswrapper[4814]: E0122 06:45:27.344404 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:45:37 crc kubenswrapper[4814]: I0122 06:45:37.621847 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-5xkqn" podUID="b6998da9-cc02-4da8-b3d4-c02f32318b6f" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 22 06:45:38 crc kubenswrapper[4814]: I0122 06:45:38.343320 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:45:38 crc kubenswrapper[4814]: E0122 06:45:38.343928 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:45:44 crc kubenswrapper[4814]: I0122 06:45:44.992802 4814 scope.go:117] "RemoveContainer" containerID="e7e456a33ad766e2c9f7fd57ba866420bc95989fa475d5ed7642b659a39e5e56" Jan 22 06:45:45 crc kubenswrapper[4814]: I0122 06:45:45.803028 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bkczq"] Jan 22 06:45:45 crc kubenswrapper[4814]: E0122 06:45:45.806528 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b4e379e-027a-4fe1-8348-ac3156947371" containerName="collect-profiles" Jan 22 06:45:45 crc kubenswrapper[4814]: I0122 06:45:45.813063 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b4e379e-027a-4fe1-8348-ac3156947371" containerName="collect-profiles" Jan 22 06:45:45 crc kubenswrapper[4814]: I0122 06:45:45.813953 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b4e379e-027a-4fe1-8348-ac3156947371" containerName="collect-profiles" Jan 22 06:45:45 crc kubenswrapper[4814]: I0122 06:45:45.817378 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:45 crc kubenswrapper[4814]: I0122 06:45:45.843750 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bkczq"] Jan 22 06:45:45 crc kubenswrapper[4814]: I0122 06:45:45.930339 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-catalog-content\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:45 crc kubenswrapper[4814]: I0122 06:45:45.930412 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5tmp\" (UniqueName: \"kubernetes.io/projected/a25d2adc-89cf-422c-bd1f-12827822dbc4-kube-api-access-r5tmp\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:45 crc kubenswrapper[4814]: I0122 06:45:45.930651 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-utilities\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.031952 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-utilities\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.032089 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-catalog-content\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.032122 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5tmp\" (UniqueName: \"kubernetes.io/projected/a25d2adc-89cf-422c-bd1f-12827822dbc4-kube-api-access-r5tmp\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.032385 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-utilities\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.033072 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-catalog-content\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.061282 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5tmp\" (UniqueName: \"kubernetes.io/projected/a25d2adc-89cf-422c-bd1f-12827822dbc4-kube-api-access-r5tmp\") pod \"redhat-operators-bkczq\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.167861 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.635313 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bkczq"] Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.875333 4814 generic.go:334] "Generic (PLEG): container finished" podID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerID="e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb" exitCode=0 Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.875388 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkczq" event={"ID":"a25d2adc-89cf-422c-bd1f-12827822dbc4","Type":"ContainerDied","Data":"e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb"} Jan 22 06:45:46 crc kubenswrapper[4814]: I0122 06:45:46.875420 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkczq" event={"ID":"a25d2adc-89cf-422c-bd1f-12827822dbc4","Type":"ContainerStarted","Data":"ab00f048eb5f6cb89f52ffc87802941b4130045fbe7cc4cf1772e13bb052b53d"} Jan 22 06:45:48 crc kubenswrapper[4814]: I0122 06:45:48.896696 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkczq" event={"ID":"a25d2adc-89cf-422c-bd1f-12827822dbc4","Type":"ContainerStarted","Data":"29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1"} Jan 22 06:45:51 crc kubenswrapper[4814]: I0122 06:45:51.933160 4814 generic.go:334] "Generic (PLEG): container finished" podID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerID="29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1" exitCode=0 Jan 22 06:45:51 crc kubenswrapper[4814]: I0122 06:45:51.933375 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkczq" event={"ID":"a25d2adc-89cf-422c-bd1f-12827822dbc4","Type":"ContainerDied","Data":"29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1"} Jan 22 06:45:52 crc kubenswrapper[4814]: I0122 06:45:52.343545 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:45:52 crc kubenswrapper[4814]: E0122 06:45:52.343862 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:45:52 crc kubenswrapper[4814]: I0122 06:45:52.945943 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkczq" event={"ID":"a25d2adc-89cf-422c-bd1f-12827822dbc4","Type":"ContainerStarted","Data":"e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15"} Jan 22 06:45:52 crc kubenswrapper[4814]: I0122 06:45:52.975812 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bkczq" podStartSLOduration=2.497265724 podStartE2EDuration="7.975794453s" podCreationTimestamp="2026-01-22 06:45:45 +0000 UTC" firstStartedPulling="2026-01-22 06:45:46.880114007 +0000 UTC m=+5232.963602222" lastFinishedPulling="2026-01-22 06:45:52.358642736 +0000 UTC m=+5238.442130951" observedRunningTime="2026-01-22 06:45:52.970782277 +0000 UTC m=+5239.054270542" watchObservedRunningTime="2026-01-22 06:45:52.975794453 +0000 UTC m=+5239.059282668" Jan 22 06:45:56 crc kubenswrapper[4814]: I0122 06:45:56.168297 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:56 crc kubenswrapper[4814]: I0122 06:45:56.168830 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:45:57 crc kubenswrapper[4814]: I0122 06:45:57.218902 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bkczq" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="registry-server" probeResult="failure" output=< Jan 22 06:45:57 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:45:57 crc kubenswrapper[4814]: > Jan 22 06:46:04 crc kubenswrapper[4814]: I0122 06:46:04.350006 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:46:04 crc kubenswrapper[4814]: E0122 06:46:04.350687 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:46:07 crc kubenswrapper[4814]: I0122 06:46:07.222338 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bkczq" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="registry-server" probeResult="failure" output=< Jan 22 06:46:07 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:46:07 crc kubenswrapper[4814]: > Jan 22 06:46:16 crc kubenswrapper[4814]: I0122 06:46:16.233712 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:46:16 crc kubenswrapper[4814]: I0122 06:46:16.295350 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:46:16 crc kubenswrapper[4814]: I0122 06:46:16.345017 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:46:16 crc kubenswrapper[4814]: E0122 06:46:16.345315 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:46:16 crc kubenswrapper[4814]: I0122 06:46:16.984871 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bkczq"] Jan 22 06:46:18 crc kubenswrapper[4814]: I0122 06:46:18.175850 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bkczq" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="registry-server" containerID="cri-o://e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15" gracePeriod=2 Jan 22 06:46:18 crc kubenswrapper[4814]: I0122 06:46:18.774853 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:46:18 crc kubenswrapper[4814]: I0122 06:46:18.964255 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-catalog-content\") pod \"a25d2adc-89cf-422c-bd1f-12827822dbc4\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " Jan 22 06:46:18 crc kubenswrapper[4814]: I0122 06:46:18.964359 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-utilities\") pod \"a25d2adc-89cf-422c-bd1f-12827822dbc4\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " Jan 22 06:46:18 crc kubenswrapper[4814]: I0122 06:46:18.964557 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5tmp\" (UniqueName: \"kubernetes.io/projected/a25d2adc-89cf-422c-bd1f-12827822dbc4-kube-api-access-r5tmp\") pod \"a25d2adc-89cf-422c-bd1f-12827822dbc4\" (UID: \"a25d2adc-89cf-422c-bd1f-12827822dbc4\") " Jan 22 06:46:18 crc kubenswrapper[4814]: I0122 06:46:18.966237 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-utilities" (OuterVolumeSpecName: "utilities") pod "a25d2adc-89cf-422c-bd1f-12827822dbc4" (UID: "a25d2adc-89cf-422c-bd1f-12827822dbc4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:46:18 crc kubenswrapper[4814]: I0122 06:46:18.976674 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a25d2adc-89cf-422c-bd1f-12827822dbc4-kube-api-access-r5tmp" (OuterVolumeSpecName: "kube-api-access-r5tmp") pod "a25d2adc-89cf-422c-bd1f-12827822dbc4" (UID: "a25d2adc-89cf-422c-bd1f-12827822dbc4"). InnerVolumeSpecName "kube-api-access-r5tmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.067228 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.067309 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5tmp\" (UniqueName: \"kubernetes.io/projected/a25d2adc-89cf-422c-bd1f-12827822dbc4-kube-api-access-r5tmp\") on node \"crc\" DevicePath \"\"" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.074952 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a25d2adc-89cf-422c-bd1f-12827822dbc4" (UID: "a25d2adc-89cf-422c-bd1f-12827822dbc4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.169422 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a25d2adc-89cf-422c-bd1f-12827822dbc4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.184083 4814 generic.go:334] "Generic (PLEG): container finished" podID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerID="e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15" exitCode=0 Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.184129 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bkczq" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.184139 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkczq" event={"ID":"a25d2adc-89cf-422c-bd1f-12827822dbc4","Type":"ContainerDied","Data":"e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15"} Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.184226 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bkczq" event={"ID":"a25d2adc-89cf-422c-bd1f-12827822dbc4","Type":"ContainerDied","Data":"ab00f048eb5f6cb89f52ffc87802941b4130045fbe7cc4cf1772e13bb052b53d"} Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.184250 4814 scope.go:117] "RemoveContainer" containerID="e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.211433 4814 scope.go:117] "RemoveContainer" containerID="29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.245776 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bkczq"] Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.246085 4814 scope.go:117] "RemoveContainer" containerID="e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.248724 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bkczq"] Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.299201 4814 scope.go:117] "RemoveContainer" containerID="e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15" Jan 22 06:46:19 crc kubenswrapper[4814]: E0122 06:46:19.302721 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15\": container with ID starting with e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15 not found: ID does not exist" containerID="e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.302748 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15"} err="failed to get container status \"e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15\": rpc error: code = NotFound desc = could not find container \"e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15\": container with ID starting with e8e5f179e61b349d2ba73b8c20190868d3c15ee65b3b63899a476dd8fef38d15 not found: ID does not exist" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.302770 4814 scope.go:117] "RemoveContainer" containerID="29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1" Jan 22 06:46:19 crc kubenswrapper[4814]: E0122 06:46:19.303756 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1\": container with ID starting with 29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1 not found: ID does not exist" containerID="29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.303780 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1"} err="failed to get container status \"29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1\": rpc error: code = NotFound desc = could not find container \"29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1\": container with ID starting with 29ae133222e678353b0039e927f33917ea30922c9b35a82d641b1745cceb50c1 not found: ID does not exist" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.303795 4814 scope.go:117] "RemoveContainer" containerID="e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb" Jan 22 06:46:19 crc kubenswrapper[4814]: E0122 06:46:19.304105 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb\": container with ID starting with e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb not found: ID does not exist" containerID="e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb" Jan 22 06:46:19 crc kubenswrapper[4814]: I0122 06:46:19.304132 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb"} err="failed to get container status \"e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb\": rpc error: code = NotFound desc = could not find container \"e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb\": container with ID starting with e32842995b333f93c9acd2e6ac488692415a7813046245eb29af6091e6785bbb not found: ID does not exist" Jan 22 06:46:20 crc kubenswrapper[4814]: I0122 06:46:20.353444 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" path="/var/lib/kubelet/pods/a25d2adc-89cf-422c-bd1f-12827822dbc4/volumes" Jan 22 06:46:29 crc kubenswrapper[4814]: I0122 06:46:29.344203 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:46:29 crc kubenswrapper[4814]: E0122 06:46:29.344846 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:46:43 crc kubenswrapper[4814]: I0122 06:46:43.343853 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:46:43 crc kubenswrapper[4814]: E0122 06:46:43.344782 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:46:58 crc kubenswrapper[4814]: I0122 06:46:58.344249 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:46:58 crc kubenswrapper[4814]: E0122 06:46:58.344994 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:47:09 crc kubenswrapper[4814]: I0122 06:47:09.344302 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:47:09 crc kubenswrapper[4814]: E0122 06:47:09.345240 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:47:24 crc kubenswrapper[4814]: I0122 06:47:24.349987 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:47:24 crc kubenswrapper[4814]: I0122 06:47:24.787454 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"3f6da396d28cc36ca41f3c66177dc4d0cb189de8276b44964aa3cabea76bd43f"} Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.711925 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5v9mf"] Jan 22 06:47:39 crc kubenswrapper[4814]: E0122 06:47:39.712755 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="extract-utilities" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.712768 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="extract-utilities" Jan 22 06:47:39 crc kubenswrapper[4814]: E0122 06:47:39.712786 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="registry-server" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.712792 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="registry-server" Jan 22 06:47:39 crc kubenswrapper[4814]: E0122 06:47:39.712817 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="extract-content" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.712823 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="extract-content" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.713025 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a25d2adc-89cf-422c-bd1f-12827822dbc4" containerName="registry-server" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.714271 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.743210 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5v9mf"] Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.764539 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-utilities\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.764593 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-catalog-content\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.764799 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4gnl\" (UniqueName: \"kubernetes.io/projected/9bcd6e49-4d95-4086-b6f7-952209d7239f-kube-api-access-x4gnl\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.867139 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-utilities\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.867180 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-catalog-content\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.867217 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4gnl\" (UniqueName: \"kubernetes.io/projected/9bcd6e49-4d95-4086-b6f7-952209d7239f-kube-api-access-x4gnl\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.868263 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-utilities\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.868471 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-catalog-content\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:39 crc kubenswrapper[4814]: I0122 06:47:39.891317 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4gnl\" (UniqueName: \"kubernetes.io/projected/9bcd6e49-4d95-4086-b6f7-952209d7239f-kube-api-access-x4gnl\") pod \"redhat-marketplace-5v9mf\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:40 crc kubenswrapper[4814]: I0122 06:47:40.041952 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:41 crc kubenswrapper[4814]: I0122 06:47:41.024300 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5v9mf"] Jan 22 06:47:41 crc kubenswrapper[4814]: I0122 06:47:41.056909 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v9mf" event={"ID":"9bcd6e49-4d95-4086-b6f7-952209d7239f","Type":"ContainerStarted","Data":"44400bb9a98045e84bf7569adb26fa69b451a5d91bf316f758a038ceeea179cf"} Jan 22 06:47:42 crc kubenswrapper[4814]: I0122 06:47:42.068817 4814 generic.go:334] "Generic (PLEG): container finished" podID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerID="ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a" exitCode=0 Jan 22 06:47:42 crc kubenswrapper[4814]: I0122 06:47:42.068901 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v9mf" event={"ID":"9bcd6e49-4d95-4086-b6f7-952209d7239f","Type":"ContainerDied","Data":"ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a"} Jan 22 06:47:42 crc kubenswrapper[4814]: I0122 06:47:42.071572 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:47:43 crc kubenswrapper[4814]: I0122 06:47:43.080362 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v9mf" event={"ID":"9bcd6e49-4d95-4086-b6f7-952209d7239f","Type":"ContainerStarted","Data":"99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479"} Jan 22 06:47:44 crc kubenswrapper[4814]: I0122 06:47:44.089450 4814 generic.go:334] "Generic (PLEG): container finished" podID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerID="99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479" exitCode=0 Jan 22 06:47:44 crc kubenswrapper[4814]: I0122 06:47:44.089503 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v9mf" event={"ID":"9bcd6e49-4d95-4086-b6f7-952209d7239f","Type":"ContainerDied","Data":"99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479"} Jan 22 06:47:45 crc kubenswrapper[4814]: I0122 06:47:45.134281 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v9mf" event={"ID":"9bcd6e49-4d95-4086-b6f7-952209d7239f","Type":"ContainerStarted","Data":"58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce"} Jan 22 06:47:45 crc kubenswrapper[4814]: I0122 06:47:45.161258 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5v9mf" podStartSLOduration=3.504149542 podStartE2EDuration="6.161239266s" podCreationTimestamp="2026-01-22 06:47:39 +0000 UTC" firstStartedPulling="2026-01-22 06:47:42.071215177 +0000 UTC m=+5348.154703412" lastFinishedPulling="2026-01-22 06:47:44.728304881 +0000 UTC m=+5350.811793136" observedRunningTime="2026-01-22 06:47:45.151476241 +0000 UTC m=+5351.234964456" watchObservedRunningTime="2026-01-22 06:47:45.161239266 +0000 UTC m=+5351.244727481" Jan 22 06:47:50 crc kubenswrapper[4814]: I0122 06:47:50.042988 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:50 crc kubenswrapper[4814]: I0122 06:47:50.043657 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:50 crc kubenswrapper[4814]: I0122 06:47:50.097550 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:50 crc kubenswrapper[4814]: I0122 06:47:50.218576 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:51 crc kubenswrapper[4814]: I0122 06:47:51.900453 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5v9mf"] Jan 22 06:47:52 crc kubenswrapper[4814]: I0122 06:47:52.189680 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5v9mf" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerName="registry-server" containerID="cri-o://58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce" gracePeriod=2 Jan 22 06:47:52 crc kubenswrapper[4814]: I0122 06:47:52.785971 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:52 crc kubenswrapper[4814]: I0122 06:47:52.933357 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-catalog-content\") pod \"9bcd6e49-4d95-4086-b6f7-952209d7239f\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " Jan 22 06:47:52 crc kubenswrapper[4814]: I0122 06:47:52.933438 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-utilities\") pod \"9bcd6e49-4d95-4086-b6f7-952209d7239f\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " Jan 22 06:47:52 crc kubenswrapper[4814]: I0122 06:47:52.933577 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4gnl\" (UniqueName: \"kubernetes.io/projected/9bcd6e49-4d95-4086-b6f7-952209d7239f-kube-api-access-x4gnl\") pod \"9bcd6e49-4d95-4086-b6f7-952209d7239f\" (UID: \"9bcd6e49-4d95-4086-b6f7-952209d7239f\") " Jan 22 06:47:52 crc kubenswrapper[4814]: I0122 06:47:52.940774 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-utilities" (OuterVolumeSpecName: "utilities") pod "9bcd6e49-4d95-4086-b6f7-952209d7239f" (UID: "9bcd6e49-4d95-4086-b6f7-952209d7239f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:47:52 crc kubenswrapper[4814]: I0122 06:47:52.944488 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bcd6e49-4d95-4086-b6f7-952209d7239f-kube-api-access-x4gnl" (OuterVolumeSpecName: "kube-api-access-x4gnl") pod "9bcd6e49-4d95-4086-b6f7-952209d7239f" (UID: "9bcd6e49-4d95-4086-b6f7-952209d7239f"). InnerVolumeSpecName "kube-api-access-x4gnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:47:52 crc kubenswrapper[4814]: I0122 06:47:52.963190 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9bcd6e49-4d95-4086-b6f7-952209d7239f" (UID: "9bcd6e49-4d95-4086-b6f7-952209d7239f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.035653 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.035684 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcd6e49-4d95-4086-b6f7-952209d7239f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.035696 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4gnl\" (UniqueName: \"kubernetes.io/projected/9bcd6e49-4d95-4086-b6f7-952209d7239f-kube-api-access-x4gnl\") on node \"crc\" DevicePath \"\"" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.199437 4814 generic.go:334] "Generic (PLEG): container finished" podID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerID="58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce" exitCode=0 Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.199491 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v9mf" event={"ID":"9bcd6e49-4d95-4086-b6f7-952209d7239f","Type":"ContainerDied","Data":"58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce"} Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.199533 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v9mf" event={"ID":"9bcd6e49-4d95-4086-b6f7-952209d7239f","Type":"ContainerDied","Data":"44400bb9a98045e84bf7569adb26fa69b451a5d91bf316f758a038ceeea179cf"} Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.199556 4814 scope.go:117] "RemoveContainer" containerID="58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.200095 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5v9mf" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.239155 4814 scope.go:117] "RemoveContainer" containerID="99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.268892 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5v9mf"] Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.276636 4814 scope.go:117] "RemoveContainer" containerID="ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.281209 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5v9mf"] Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.335177 4814 scope.go:117] "RemoveContainer" containerID="58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce" Jan 22 06:47:53 crc kubenswrapper[4814]: E0122 06:47:53.335804 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce\": container with ID starting with 58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce not found: ID does not exist" containerID="58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.335839 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce"} err="failed to get container status \"58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce\": rpc error: code = NotFound desc = could not find container \"58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce\": container with ID starting with 58d9b2ac4df9fcae4b7fa93155feb6e1688eecdfd5a6eaf0be42e93e9327b1ce not found: ID does not exist" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.335858 4814 scope.go:117] "RemoveContainer" containerID="99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479" Jan 22 06:47:53 crc kubenswrapper[4814]: E0122 06:47:53.336290 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479\": container with ID starting with 99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479 not found: ID does not exist" containerID="99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.336314 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479"} err="failed to get container status \"99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479\": rpc error: code = NotFound desc = could not find container \"99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479\": container with ID starting with 99c4769ed1c59cf115f7a207bb961a4b5417b8de69eec2f22e92f3012bbd8479 not found: ID does not exist" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.336328 4814 scope.go:117] "RemoveContainer" containerID="ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a" Jan 22 06:47:53 crc kubenswrapper[4814]: E0122 06:47:53.336651 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a\": container with ID starting with ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a not found: ID does not exist" containerID="ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a" Jan 22 06:47:53 crc kubenswrapper[4814]: I0122 06:47:53.336675 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a"} err="failed to get container status \"ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a\": rpc error: code = NotFound desc = could not find container \"ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a\": container with ID starting with ed596f46e056f9a8fd89bff1ed6198bcffaf94f3e4d9ffef1634b6a089d6422a not found: ID does not exist" Jan 22 06:47:54 crc kubenswrapper[4814]: I0122 06:47:54.357240 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" path="/var/lib/kubelet/pods/9bcd6e49-4d95-4086-b6f7-952209d7239f/volumes" Jan 22 06:48:51 crc kubenswrapper[4814]: E0122 06:48:51.578786 4814 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.110:38046->38.102.83.110:39385: write tcp 38.102.83.110:38046->38.102.83.110:39385: write: broken pipe Jan 22 06:49:49 crc kubenswrapper[4814]: I0122 06:49:49.614780 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:49:49 crc kubenswrapper[4814]: I0122 06:49:49.617230 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.386295 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5c7779bc69-m8twp"] Jan 22 06:50:08 crc kubenswrapper[4814]: E0122 06:50:08.387171 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerName="extract-utilities" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.387185 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerName="extract-utilities" Jan 22 06:50:08 crc kubenswrapper[4814]: E0122 06:50:08.387201 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerName="registry-server" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.387207 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerName="registry-server" Jan 22 06:50:08 crc kubenswrapper[4814]: E0122 06:50:08.387235 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerName="extract-content" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.387241 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerName="extract-content" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.387408 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bcd6e49-4d95-4086-b6f7-952209d7239f" containerName="registry-server" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.388494 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.465040 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c7779bc69-m8twp"] Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.489892 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-combined-ca-bundle\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.490038 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-httpd-config\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.490057 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-internal-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.490073 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-config\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.490120 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svw2h\" (UniqueName: \"kubernetes.io/projected/e59b9bca-da4b-44d9-8d36-22933672f479-kube-api-access-svw2h\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.490162 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-ovndb-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.490183 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-public-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.591811 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-httpd-config\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.591863 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-internal-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.591898 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-config\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.591960 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svw2h\" (UniqueName: \"kubernetes.io/projected/e59b9bca-da4b-44d9-8d36-22933672f479-kube-api-access-svw2h\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.592013 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-ovndb-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.592035 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-public-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.592052 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-combined-ca-bundle\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.599828 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-config\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.603441 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-ovndb-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.603488 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-httpd-config\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.603583 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-combined-ca-bundle\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.604031 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-internal-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.609391 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e59b9bca-da4b-44d9-8d36-22933672f479-public-tls-certs\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.620952 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svw2h\" (UniqueName: \"kubernetes.io/projected/e59b9bca-da4b-44d9-8d36-22933672f479-kube-api-access-svw2h\") pod \"neutron-5c7779bc69-m8twp\" (UID: \"e59b9bca-da4b-44d9-8d36-22933672f479\") " pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:08 crc kubenswrapper[4814]: I0122 06:50:08.704248 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:09 crc kubenswrapper[4814]: W0122 06:50:09.493268 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode59b9bca_da4b_44d9_8d36_22933672f479.slice/crio-4c2cee8d80ad5e85e2ebcd9af315bb3543a30e539eb65f080c16cabd3d5afc08 WatchSource:0}: Error finding container 4c2cee8d80ad5e85e2ebcd9af315bb3543a30e539eb65f080c16cabd3d5afc08: Status 404 returned error can't find the container with id 4c2cee8d80ad5e85e2ebcd9af315bb3543a30e539eb65f080c16cabd3d5afc08 Jan 22 06:50:09 crc kubenswrapper[4814]: I0122 06:50:09.505441 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c7779bc69-m8twp"] Jan 22 06:50:09 crc kubenswrapper[4814]: I0122 06:50:09.639512 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c7779bc69-m8twp" event={"ID":"e59b9bca-da4b-44d9-8d36-22933672f479","Type":"ContainerStarted","Data":"4c2cee8d80ad5e85e2ebcd9af315bb3543a30e539eb65f080c16cabd3d5afc08"} Jan 22 06:50:10 crc kubenswrapper[4814]: I0122 06:50:10.670849 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c7779bc69-m8twp" event={"ID":"e59b9bca-da4b-44d9-8d36-22933672f479","Type":"ContainerStarted","Data":"ea44d4920facd55aec67f8d9483c388f8ddf65713aa36cb4cd817bfa2bf8ffd5"} Jan 22 06:50:10 crc kubenswrapper[4814]: I0122 06:50:10.671139 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c7779bc69-m8twp" event={"ID":"e59b9bca-da4b-44d9-8d36-22933672f479","Type":"ContainerStarted","Data":"c46d62984b4596d4069858e1bd340655c0892651ac442b5f5918678a82ad3a44"} Jan 22 06:50:10 crc kubenswrapper[4814]: I0122 06:50:10.671185 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:10 crc kubenswrapper[4814]: I0122 06:50:10.706362 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5c7779bc69-m8twp" podStartSLOduration=2.706340957 podStartE2EDuration="2.706340957s" podCreationTimestamp="2026-01-22 06:50:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 06:50:10.695466768 +0000 UTC m=+5496.778954993" watchObservedRunningTime="2026-01-22 06:50:10.706340957 +0000 UTC m=+5496.789829162" Jan 22 06:50:19 crc kubenswrapper[4814]: I0122 06:50:19.614412 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:50:19 crc kubenswrapper[4814]: I0122 06:50:19.614965 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:50:38 crc kubenswrapper[4814]: I0122 06:50:38.730908 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5c7779bc69-m8twp" Jan 22 06:50:38 crc kubenswrapper[4814]: I0122 06:50:38.856589 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5cf7d7889-27mtt"] Jan 22 06:50:38 crc kubenswrapper[4814]: I0122 06:50:38.857188 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5cf7d7889-27mtt" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerName="neutron-api" containerID="cri-o://9370c2f5eb9fb90e56ce8dfc3b4ef685be64651243a04e857cc9d387515ebbfe" gracePeriod=30 Jan 22 06:50:38 crc kubenswrapper[4814]: I0122 06:50:38.857346 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5cf7d7889-27mtt" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerName="neutron-httpd" containerID="cri-o://b5ab4f248565a32892ccfb1552a875f4e3e004a7648cf25d1f798f477759e22b" gracePeriod=30 Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.535206 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2vrgl"] Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.539856 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.566755 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2vrgl"] Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.622494 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-catalog-content\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.622572 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-utilities\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.622591 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ppb6\" (UniqueName: \"kubernetes.io/projected/5233071f-1c58-43e7-b415-62edb5f81f49-kube-api-access-7ppb6\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.724245 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-catalog-content\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.724337 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ppb6\" (UniqueName: \"kubernetes.io/projected/5233071f-1c58-43e7-b415-62edb5f81f49-kube-api-access-7ppb6\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.724363 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-utilities\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.724741 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-catalog-content\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.724978 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-utilities\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.757490 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ppb6\" (UniqueName: \"kubernetes.io/projected/5233071f-1c58-43e7-b415-62edb5f81f49-kube-api-access-7ppb6\") pod \"certified-operators-2vrgl\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.861914 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.987366 4814 generic.go:334] "Generic (PLEG): container finished" podID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerID="b5ab4f248565a32892ccfb1552a875f4e3e004a7648cf25d1f798f477759e22b" exitCode=0 Jan 22 06:50:39 crc kubenswrapper[4814]: I0122 06:50:39.987612 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cf7d7889-27mtt" event={"ID":"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a","Type":"ContainerDied","Data":"b5ab4f248565a32892ccfb1552a875f4e3e004a7648cf25d1f798f477759e22b"} Jan 22 06:50:40 crc kubenswrapper[4814]: I0122 06:50:40.482604 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2vrgl"] Jan 22 06:50:40 crc kubenswrapper[4814]: I0122 06:50:40.998461 4814 generic.go:334] "Generic (PLEG): container finished" podID="5233071f-1c58-43e7-b415-62edb5f81f49" containerID="19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441" exitCode=0 Jan 22 06:50:40 crc kubenswrapper[4814]: I0122 06:50:40.998563 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vrgl" event={"ID":"5233071f-1c58-43e7-b415-62edb5f81f49","Type":"ContainerDied","Data":"19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441"} Jan 22 06:50:40 crc kubenswrapper[4814]: I0122 06:50:40.998838 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vrgl" event={"ID":"5233071f-1c58-43e7-b415-62edb5f81f49","Type":"ContainerStarted","Data":"12fabfad1e92838db32db264918fb6cd1003416b7d0865fc269984a697408376"} Jan 22 06:50:42 crc kubenswrapper[4814]: I0122 06:50:42.011827 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vrgl" event={"ID":"5233071f-1c58-43e7-b415-62edb5f81f49","Type":"ContainerStarted","Data":"0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1"} Jan 22 06:50:43 crc kubenswrapper[4814]: I0122 06:50:43.020904 4814 generic.go:334] "Generic (PLEG): container finished" podID="5233071f-1c58-43e7-b415-62edb5f81f49" containerID="0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1" exitCode=0 Jan 22 06:50:43 crc kubenswrapper[4814]: I0122 06:50:43.020946 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vrgl" event={"ID":"5233071f-1c58-43e7-b415-62edb5f81f49","Type":"ContainerDied","Data":"0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1"} Jan 22 06:50:44 crc kubenswrapper[4814]: I0122 06:50:44.033086 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vrgl" event={"ID":"5233071f-1c58-43e7-b415-62edb5f81f49","Type":"ContainerStarted","Data":"dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90"} Jan 22 06:50:44 crc kubenswrapper[4814]: I0122 06:50:44.063793 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2vrgl" podStartSLOduration=2.653477758 podStartE2EDuration="5.063775812s" podCreationTimestamp="2026-01-22 06:50:39 +0000 UTC" firstStartedPulling="2026-01-22 06:50:41.001006309 +0000 UTC m=+5527.084494514" lastFinishedPulling="2026-01-22 06:50:43.411304353 +0000 UTC m=+5529.494792568" observedRunningTime="2026-01-22 06:50:44.05887869 +0000 UTC m=+5530.142366905" watchObservedRunningTime="2026-01-22 06:50:44.063775812 +0000 UTC m=+5530.147264027" Jan 22 06:50:49 crc kubenswrapper[4814]: I0122 06:50:49.614652 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:50:49 crc kubenswrapper[4814]: I0122 06:50:49.615238 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:50:49 crc kubenswrapper[4814]: I0122 06:50:49.615319 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:50:49 crc kubenswrapper[4814]: I0122 06:50:49.616795 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3f6da396d28cc36ca41f3c66177dc4d0cb189de8276b44964aa3cabea76bd43f"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:50:49 crc kubenswrapper[4814]: I0122 06:50:49.616890 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://3f6da396d28cc36ca41f3c66177dc4d0cb189de8276b44964aa3cabea76bd43f" gracePeriod=600 Jan 22 06:50:49 crc kubenswrapper[4814]: I0122 06:50:49.863172 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:49 crc kubenswrapper[4814]: I0122 06:50:49.863314 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:49 crc kubenswrapper[4814]: I0122 06:50:49.934520 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.106919 4814 generic.go:334] "Generic (PLEG): container finished" podID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerID="9370c2f5eb9fb90e56ce8dfc3b4ef685be64651243a04e857cc9d387515ebbfe" exitCode=0 Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.106981 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cf7d7889-27mtt" event={"ID":"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a","Type":"ContainerDied","Data":"9370c2f5eb9fb90e56ce8dfc3b4ef685be64651243a04e857cc9d387515ebbfe"} Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.116033 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="3f6da396d28cc36ca41f3c66177dc4d0cb189de8276b44964aa3cabea76bd43f" exitCode=0 Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.116977 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"3f6da396d28cc36ca41f3c66177dc4d0cb189de8276b44964aa3cabea76bd43f"} Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.117116 4814 scope.go:117] "RemoveContainer" containerID="6c3b801e939a8982d4bdc8adc0dd42b643a8dedab7d3a2a07910d51cd50cdd68" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.199610 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.279806 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2vrgl"] Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.389647 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.562775 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-httpd-config\") pod \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.563053 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-public-tls-certs\") pod \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.563158 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vd8rt\" (UniqueName: \"kubernetes.io/projected/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-kube-api-access-vd8rt\") pod \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.563314 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-internal-tls-certs\") pod \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.563528 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-combined-ca-bundle\") pod \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.563747 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-ovndb-tls-certs\") pod \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.563852 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-config\") pod \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\" (UID: \"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a\") " Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.575276 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-kube-api-access-vd8rt" (OuterVolumeSpecName: "kube-api-access-vd8rt") pod "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" (UID: "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a"). InnerVolumeSpecName "kube-api-access-vd8rt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.578131 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" (UID: "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.623665 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-config" (OuterVolumeSpecName: "config") pod "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" (UID: "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.627429 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" (UID: "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.653887 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" (UID: "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.663489 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" (UID: "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.666046 4814 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.666090 4814 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.666105 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vd8rt\" (UniqueName: \"kubernetes.io/projected/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-kube-api-access-vd8rt\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.666118 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.666132 4814 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.666144 4814 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-config\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.671382 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" (UID: "c3a7ae87-2590-44b6-8d8e-e8f55a705f9a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 06:50:50 crc kubenswrapper[4814]: I0122 06:50:50.767866 4814 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:51 crc kubenswrapper[4814]: I0122 06:50:51.125796 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5cf7d7889-27mtt" event={"ID":"c3a7ae87-2590-44b6-8d8e-e8f55a705f9a","Type":"ContainerDied","Data":"4504c8e8fae62d0aa1ce406223ed6337600ebb65e15ce93a23c888d83c826826"} Jan 22 06:50:51 crc kubenswrapper[4814]: I0122 06:50:51.126099 4814 scope.go:117] "RemoveContainer" containerID="b5ab4f248565a32892ccfb1552a875f4e3e004a7648cf25d1f798f477759e22b" Jan 22 06:50:51 crc kubenswrapper[4814]: I0122 06:50:51.125830 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5cf7d7889-27mtt" Jan 22 06:50:51 crc kubenswrapper[4814]: I0122 06:50:51.130385 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a"} Jan 22 06:50:51 crc kubenswrapper[4814]: I0122 06:50:51.150946 4814 scope.go:117] "RemoveContainer" containerID="9370c2f5eb9fb90e56ce8dfc3b4ef685be64651243a04e857cc9d387515ebbfe" Jan 22 06:50:51 crc kubenswrapper[4814]: I0122 06:50:51.182504 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5cf7d7889-27mtt"] Jan 22 06:50:51 crc kubenswrapper[4814]: I0122 06:50:51.194054 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5cf7d7889-27mtt"] Jan 22 06:50:52 crc kubenswrapper[4814]: I0122 06:50:52.138974 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2vrgl" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" containerName="registry-server" containerID="cri-o://dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90" gracePeriod=2 Jan 22 06:50:52 crc kubenswrapper[4814]: I0122 06:50:52.353400 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" path="/var/lib/kubelet/pods/c3a7ae87-2590-44b6-8d8e-e8f55a705f9a/volumes" Jan 22 06:50:52 crc kubenswrapper[4814]: I0122 06:50:52.957669 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.014256 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-utilities\") pod \"5233071f-1c58-43e7-b415-62edb5f81f49\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.014522 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ppb6\" (UniqueName: \"kubernetes.io/projected/5233071f-1c58-43e7-b415-62edb5f81f49-kube-api-access-7ppb6\") pod \"5233071f-1c58-43e7-b415-62edb5f81f49\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.015292 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-utilities" (OuterVolumeSpecName: "utilities") pod "5233071f-1c58-43e7-b415-62edb5f81f49" (UID: "5233071f-1c58-43e7-b415-62edb5f81f49"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.024839 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5233071f-1c58-43e7-b415-62edb5f81f49-kube-api-access-7ppb6" (OuterVolumeSpecName: "kube-api-access-7ppb6") pod "5233071f-1c58-43e7-b415-62edb5f81f49" (UID: "5233071f-1c58-43e7-b415-62edb5f81f49"). InnerVolumeSpecName "kube-api-access-7ppb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.115952 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-catalog-content\") pod \"5233071f-1c58-43e7-b415-62edb5f81f49\" (UID: \"5233071f-1c58-43e7-b415-62edb5f81f49\") " Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.116703 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.116723 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ppb6\" (UniqueName: \"kubernetes.io/projected/5233071f-1c58-43e7-b415-62edb5f81f49-kube-api-access-7ppb6\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.148109 4814 generic.go:334] "Generic (PLEG): container finished" podID="5233071f-1c58-43e7-b415-62edb5f81f49" containerID="dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90" exitCode=0 Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.148152 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vrgl" event={"ID":"5233071f-1c58-43e7-b415-62edb5f81f49","Type":"ContainerDied","Data":"dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90"} Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.148178 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vrgl" event={"ID":"5233071f-1c58-43e7-b415-62edb5f81f49","Type":"ContainerDied","Data":"12fabfad1e92838db32db264918fb6cd1003416b7d0865fc269984a697408376"} Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.148177 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2vrgl" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.148194 4814 scope.go:117] "RemoveContainer" containerID="dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.173262 4814 scope.go:117] "RemoveContainer" containerID="0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.176838 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5233071f-1c58-43e7-b415-62edb5f81f49" (UID: "5233071f-1c58-43e7-b415-62edb5f81f49"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.197265 4814 scope.go:117] "RemoveContainer" containerID="19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.218167 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5233071f-1c58-43e7-b415-62edb5f81f49-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.261811 4814 scope.go:117] "RemoveContainer" containerID="dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90" Jan 22 06:50:53 crc kubenswrapper[4814]: E0122 06:50:53.262278 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90\": container with ID starting with dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90 not found: ID does not exist" containerID="dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.262327 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90"} err="failed to get container status \"dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90\": rpc error: code = NotFound desc = could not find container \"dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90\": container with ID starting with dac5939647ef0e7b151903c9eeeaad61a6ccb3620d91f8208a232903a4353e90 not found: ID does not exist" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.262360 4814 scope.go:117] "RemoveContainer" containerID="0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1" Jan 22 06:50:53 crc kubenswrapper[4814]: E0122 06:50:53.262897 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1\": container with ID starting with 0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1 not found: ID does not exist" containerID="0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.262932 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1"} err="failed to get container status \"0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1\": rpc error: code = NotFound desc = could not find container \"0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1\": container with ID starting with 0c708dc3b21bcd735915d2541e08ec18a9fd78d28d8c62876fe12d7886a0cfe1 not found: ID does not exist" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.262955 4814 scope.go:117] "RemoveContainer" containerID="19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441" Jan 22 06:50:53 crc kubenswrapper[4814]: E0122 06:50:53.263220 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441\": container with ID starting with 19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441 not found: ID does not exist" containerID="19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.263249 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441"} err="failed to get container status \"19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441\": rpc error: code = NotFound desc = could not find container \"19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441\": container with ID starting with 19e7b54eb4a644e5924afbc8c49c561759b8406e1cb373bc087b967f95261441 not found: ID does not exist" Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.481576 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2vrgl"] Jan 22 06:50:53 crc kubenswrapper[4814]: I0122 06:50:53.492392 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2vrgl"] Jan 22 06:50:54 crc kubenswrapper[4814]: I0122 06:50:54.361727 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" path="/var/lib/kubelet/pods/5233071f-1c58-43e7-b415-62edb5f81f49/volumes" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.593537 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wwwq4"] Jan 22 06:52:42 crc kubenswrapper[4814]: E0122 06:52:42.594478 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" containerName="registry-server" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.594494 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" containerName="registry-server" Jan 22 06:52:42 crc kubenswrapper[4814]: E0122 06:52:42.594515 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" containerName="extract-content" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.594524 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" containerName="extract-content" Jan 22 06:52:42 crc kubenswrapper[4814]: E0122 06:52:42.594549 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerName="neutron-httpd" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.594558 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerName="neutron-httpd" Jan 22 06:52:42 crc kubenswrapper[4814]: E0122 06:52:42.594573 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" containerName="extract-utilities" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.594582 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" containerName="extract-utilities" Jan 22 06:52:42 crc kubenswrapper[4814]: E0122 06:52:42.594597 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerName="neutron-api" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.594605 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerName="neutron-api" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.594861 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerName="neutron-api" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.594888 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3a7ae87-2590-44b6-8d8e-e8f55a705f9a" containerName="neutron-httpd" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.594915 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="5233071f-1c58-43e7-b415-62edb5f81f49" containerName="registry-server" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.596561 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.653569 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wwwq4"] Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.712162 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc286b1b-e3bd-469a-8443-42f0cd574f22-catalog-content\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.712303 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc286b1b-e3bd-469a-8443-42f0cd574f22-utilities\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.712377 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjgld\" (UniqueName: \"kubernetes.io/projected/fc286b1b-e3bd-469a-8443-42f0cd574f22-kube-api-access-rjgld\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.813654 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc286b1b-e3bd-469a-8443-42f0cd574f22-utilities\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.814066 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjgld\" (UniqueName: \"kubernetes.io/projected/fc286b1b-e3bd-469a-8443-42f0cd574f22-kube-api-access-rjgld\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.814167 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc286b1b-e3bd-469a-8443-42f0cd574f22-utilities\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.814344 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc286b1b-e3bd-469a-8443-42f0cd574f22-catalog-content\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.814749 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc286b1b-e3bd-469a-8443-42f0cd574f22-catalog-content\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.842387 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjgld\" (UniqueName: \"kubernetes.io/projected/fc286b1b-e3bd-469a-8443-42f0cd574f22-kube-api-access-rjgld\") pod \"community-operators-wwwq4\" (UID: \"fc286b1b-e3bd-469a-8443-42f0cd574f22\") " pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:42 crc kubenswrapper[4814]: I0122 06:52:42.926359 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:52:43 crc kubenswrapper[4814]: I0122 06:52:43.420780 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wwwq4"] Jan 22 06:52:44 crc kubenswrapper[4814]: I0122 06:52:44.268274 4814 generic.go:334] "Generic (PLEG): container finished" podID="fc286b1b-e3bd-469a-8443-42f0cd574f22" containerID="b3fce4514a6c28fe732227e537e7e1f56673f8489687b081856dbd2601ea7caf" exitCode=0 Jan 22 06:52:44 crc kubenswrapper[4814]: I0122 06:52:44.268427 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwwq4" event={"ID":"fc286b1b-e3bd-469a-8443-42f0cd574f22","Type":"ContainerDied","Data":"b3fce4514a6c28fe732227e537e7e1f56673f8489687b081856dbd2601ea7caf"} Jan 22 06:52:44 crc kubenswrapper[4814]: I0122 06:52:44.268590 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwwq4" event={"ID":"fc286b1b-e3bd-469a-8443-42f0cd574f22","Type":"ContainerStarted","Data":"15fdadce0444e1bcfb8e8893313e915f7ab4c43abb0af7df6103747c8f99fbdc"} Jan 22 06:52:44 crc kubenswrapper[4814]: I0122 06:52:44.271397 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:52:51 crc kubenswrapper[4814]: I0122 06:52:51.338003 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwwq4" event={"ID":"fc286b1b-e3bd-469a-8443-42f0cd574f22","Type":"ContainerStarted","Data":"3af2202cd9814f8427778e73d15fd657cc9d20e690b12392a3d52367a06ecb6f"} Jan 22 06:52:52 crc kubenswrapper[4814]: I0122 06:52:52.349231 4814 generic.go:334] "Generic (PLEG): container finished" podID="fc286b1b-e3bd-469a-8443-42f0cd574f22" containerID="3af2202cd9814f8427778e73d15fd657cc9d20e690b12392a3d52367a06ecb6f" exitCode=0 Jan 22 06:52:52 crc kubenswrapper[4814]: I0122 06:52:52.354916 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwwq4" event={"ID":"fc286b1b-e3bd-469a-8443-42f0cd574f22","Type":"ContainerDied","Data":"3af2202cd9814f8427778e73d15fd657cc9d20e690b12392a3d52367a06ecb6f"} Jan 22 06:52:53 crc kubenswrapper[4814]: I0122 06:52:53.387234 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwwq4" event={"ID":"fc286b1b-e3bd-469a-8443-42f0cd574f22","Type":"ContainerStarted","Data":"4d7c723241dbe6bd476276210de513836e019c26f3d65ef2e90dbabd29fb5083"} Jan 22 06:52:53 crc kubenswrapper[4814]: I0122 06:52:53.415276 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wwwq4" podStartSLOduration=2.925192396 podStartE2EDuration="11.415256981s" podCreationTimestamp="2026-01-22 06:52:42 +0000 UTC" firstStartedPulling="2026-01-22 06:52:44.271166577 +0000 UTC m=+5650.354654792" lastFinishedPulling="2026-01-22 06:52:52.761231162 +0000 UTC m=+5658.844719377" observedRunningTime="2026-01-22 06:52:53.409190825 +0000 UTC m=+5659.492679040" watchObservedRunningTime="2026-01-22 06:52:53.415256981 +0000 UTC m=+5659.498745186" Jan 22 06:53:02 crc kubenswrapper[4814]: I0122 06:53:02.927208 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:53:02 crc kubenswrapper[4814]: I0122 06:53:02.927702 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:53:02 crc kubenswrapper[4814]: I0122 06:53:02.984041 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:53:03 crc kubenswrapper[4814]: I0122 06:53:03.518128 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wwwq4" Jan 22 06:53:03 crc kubenswrapper[4814]: I0122 06:53:03.603915 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wwwq4"] Jan 22 06:53:03 crc kubenswrapper[4814]: I0122 06:53:03.650720 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g5zdq"] Jan 22 06:53:03 crc kubenswrapper[4814]: I0122 06:53:03.651738 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g5zdq" podUID="56872458-6815-4696-9016-c033137187f0" containerName="registry-server" containerID="cri-o://4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654" gracePeriod=2 Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.313164 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5zdq" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.474683 4814 generic.go:334] "Generic (PLEG): container finished" podID="56872458-6815-4696-9016-c033137187f0" containerID="4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654" exitCode=0 Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.475027 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g5zdq" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.475492 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5zdq" event={"ID":"56872458-6815-4696-9016-c033137187f0","Type":"ContainerDied","Data":"4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654"} Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.475525 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g5zdq" event={"ID":"56872458-6815-4696-9016-c033137187f0","Type":"ContainerDied","Data":"1f594f8a2f7a10576d76cc7e4caadbfd1c87e77edb89623e743e2a1b51291851"} Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.475543 4814 scope.go:117] "RemoveContainer" containerID="4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.496734 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-catalog-content\") pod \"56872458-6815-4696-9016-c033137187f0\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.496908 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-utilities\") pod \"56872458-6815-4696-9016-c033137187f0\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.496946 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmx86\" (UniqueName: \"kubernetes.io/projected/56872458-6815-4696-9016-c033137187f0-kube-api-access-nmx86\") pod \"56872458-6815-4696-9016-c033137187f0\" (UID: \"56872458-6815-4696-9016-c033137187f0\") " Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.497473 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-utilities" (OuterVolumeSpecName: "utilities") pod "56872458-6815-4696-9016-c033137187f0" (UID: "56872458-6815-4696-9016-c033137187f0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.497496 4814 scope.go:117] "RemoveContainer" containerID="64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.498514 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.522021 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56872458-6815-4696-9016-c033137187f0-kube-api-access-nmx86" (OuterVolumeSpecName: "kube-api-access-nmx86") pod "56872458-6815-4696-9016-c033137187f0" (UID: "56872458-6815-4696-9016-c033137187f0"). InnerVolumeSpecName "kube-api-access-nmx86". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.588156 4814 scope.go:117] "RemoveContainer" containerID="709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.602670 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmx86\" (UniqueName: \"kubernetes.io/projected/56872458-6815-4696-9016-c033137187f0-kube-api-access-nmx86\") on node \"crc\" DevicePath \"\"" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.617402 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "56872458-6815-4696-9016-c033137187f0" (UID: "56872458-6815-4696-9016-c033137187f0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.634377 4814 scope.go:117] "RemoveContainer" containerID="4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654" Jan 22 06:53:04 crc kubenswrapper[4814]: E0122 06:53:04.635973 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654\": container with ID starting with 4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654 not found: ID does not exist" containerID="4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.636002 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654"} err="failed to get container status \"4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654\": rpc error: code = NotFound desc = could not find container \"4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654\": container with ID starting with 4a8697eb77831ab70909c2a60235c78da99301179f434224223233d4f4ad3654 not found: ID does not exist" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.636022 4814 scope.go:117] "RemoveContainer" containerID="64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd" Jan 22 06:53:04 crc kubenswrapper[4814]: E0122 06:53:04.636929 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd\": container with ID starting with 64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd not found: ID does not exist" containerID="64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.636952 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd"} err="failed to get container status \"64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd\": rpc error: code = NotFound desc = could not find container \"64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd\": container with ID starting with 64e7248a791c56806bff1bb1a0d89362d4dd98bd35739e72dab4cfec30de23fd not found: ID does not exist" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.636965 4814 scope.go:117] "RemoveContainer" containerID="709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36" Jan 22 06:53:04 crc kubenswrapper[4814]: E0122 06:53:04.637393 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36\": container with ID starting with 709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36 not found: ID does not exist" containerID="709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.637415 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36"} err="failed to get container status \"709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36\": rpc error: code = NotFound desc = could not find container \"709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36\": container with ID starting with 709edcad206bc30fd728840720629778d1bde260101fe7c9e3807e1eef3c5f36 not found: ID does not exist" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.704520 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56872458-6815-4696-9016-c033137187f0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.817757 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g5zdq"] Jan 22 06:53:04 crc kubenswrapper[4814]: I0122 06:53:04.830033 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g5zdq"] Jan 22 06:53:06 crc kubenswrapper[4814]: I0122 06:53:06.375270 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56872458-6815-4696-9016-c033137187f0" path="/var/lib/kubelet/pods/56872458-6815-4696-9016-c033137187f0/volumes" Jan 22 06:53:19 crc kubenswrapper[4814]: I0122 06:53:19.613764 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:53:19 crc kubenswrapper[4814]: I0122 06:53:19.614333 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:53:49 crc kubenswrapper[4814]: I0122 06:53:49.614500 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:53:49 crc kubenswrapper[4814]: I0122 06:53:49.615149 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:54:19 crc kubenswrapper[4814]: I0122 06:54:19.613798 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 06:54:19 crc kubenswrapper[4814]: I0122 06:54:19.614384 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 06:54:19 crc kubenswrapper[4814]: I0122 06:54:19.614444 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 06:54:19 crc kubenswrapper[4814]: I0122 06:54:19.615412 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 06:54:19 crc kubenswrapper[4814]: I0122 06:54:19.615485 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" gracePeriod=600 Jan 22 06:54:19 crc kubenswrapper[4814]: E0122 06:54:19.786187 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:54:20 crc kubenswrapper[4814]: I0122 06:54:20.261183 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" exitCode=0 Jan 22 06:54:20 crc kubenswrapper[4814]: I0122 06:54:20.261280 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a"} Jan 22 06:54:20 crc kubenswrapper[4814]: I0122 06:54:20.262032 4814 scope.go:117] "RemoveContainer" containerID="3f6da396d28cc36ca41f3c66177dc4d0cb189de8276b44964aa3cabea76bd43f" Jan 22 06:54:20 crc kubenswrapper[4814]: I0122 06:54:20.263272 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:54:20 crc kubenswrapper[4814]: E0122 06:54:20.263919 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:54:35 crc kubenswrapper[4814]: I0122 06:54:35.344485 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:54:35 crc kubenswrapper[4814]: E0122 06:54:35.345196 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:54:46 crc kubenswrapper[4814]: I0122 06:54:46.344619 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:54:46 crc kubenswrapper[4814]: E0122 06:54:46.345504 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:54:59 crc kubenswrapper[4814]: I0122 06:54:59.344979 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:54:59 crc kubenswrapper[4814]: E0122 06:54:59.346057 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:55:10 crc kubenswrapper[4814]: I0122 06:55:10.346591 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:55:10 crc kubenswrapper[4814]: E0122 06:55:10.347452 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:55:24 crc kubenswrapper[4814]: I0122 06:55:24.349761 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:55:24 crc kubenswrapper[4814]: E0122 06:55:24.350363 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:55:38 crc kubenswrapper[4814]: I0122 06:55:38.344360 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:55:38 crc kubenswrapper[4814]: E0122 06:55:38.345020 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:55:50 crc kubenswrapper[4814]: I0122 06:55:50.344520 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:55:50 crc kubenswrapper[4814]: E0122 06:55:50.346056 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:56:02 crc kubenswrapper[4814]: I0122 06:56:02.348613 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:56:02 crc kubenswrapper[4814]: E0122 06:56:02.351377 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.841402 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-282rf"] Jan 22 06:56:04 crc kubenswrapper[4814]: E0122 06:56:04.848141 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56872458-6815-4696-9016-c033137187f0" containerName="extract-content" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.848168 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="56872458-6815-4696-9016-c033137187f0" containerName="extract-content" Jan 22 06:56:04 crc kubenswrapper[4814]: E0122 06:56:04.848221 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56872458-6815-4696-9016-c033137187f0" containerName="registry-server" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.848229 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="56872458-6815-4696-9016-c033137187f0" containerName="registry-server" Jan 22 06:56:04 crc kubenswrapper[4814]: E0122 06:56:04.848278 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56872458-6815-4696-9016-c033137187f0" containerName="extract-utilities" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.848288 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="56872458-6815-4696-9016-c033137187f0" containerName="extract-utilities" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.848558 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="56872458-6815-4696-9016-c033137187f0" containerName="registry-server" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.851171 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.882290 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-282rf"] Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.951684 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-catalog-content\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.951759 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbf69\" (UniqueName: \"kubernetes.io/projected/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-kube-api-access-qbf69\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:04 crc kubenswrapper[4814]: I0122 06:56:04.951971 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-utilities\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:05 crc kubenswrapper[4814]: I0122 06:56:05.053325 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-utilities\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:05 crc kubenswrapper[4814]: I0122 06:56:05.053459 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-catalog-content\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:05 crc kubenswrapper[4814]: I0122 06:56:05.053486 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbf69\" (UniqueName: \"kubernetes.io/projected/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-kube-api-access-qbf69\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:05 crc kubenswrapper[4814]: I0122 06:56:05.054198 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-utilities\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:05 crc kubenswrapper[4814]: I0122 06:56:05.054285 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-catalog-content\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:05 crc kubenswrapper[4814]: I0122 06:56:05.076235 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbf69\" (UniqueName: \"kubernetes.io/projected/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-kube-api-access-qbf69\") pod \"redhat-operators-282rf\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:05 crc kubenswrapper[4814]: I0122 06:56:05.184763 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:05 crc kubenswrapper[4814]: I0122 06:56:05.766585 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-282rf"] Jan 22 06:56:06 crc kubenswrapper[4814]: I0122 06:56:06.211909 4814 generic.go:334] "Generic (PLEG): container finished" podID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerID="9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0" exitCode=0 Jan 22 06:56:06 crc kubenswrapper[4814]: I0122 06:56:06.211987 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-282rf" event={"ID":"e69d849d-5f5a-4dfd-935e-f7f001a3f19f","Type":"ContainerDied","Data":"9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0"} Jan 22 06:56:06 crc kubenswrapper[4814]: I0122 06:56:06.212046 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-282rf" event={"ID":"e69d849d-5f5a-4dfd-935e-f7f001a3f19f","Type":"ContainerStarted","Data":"ff595c897e6f97d9f2f5bfe9e74ae23a97717685f09b3945eea21f2a0fc12012"} Jan 22 06:56:08 crc kubenswrapper[4814]: I0122 06:56:08.231473 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-282rf" event={"ID":"e69d849d-5f5a-4dfd-935e-f7f001a3f19f","Type":"ContainerStarted","Data":"7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e"} Jan 22 06:56:13 crc kubenswrapper[4814]: I0122 06:56:13.284086 4814 generic.go:334] "Generic (PLEG): container finished" podID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerID="7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e" exitCode=0 Jan 22 06:56:13 crc kubenswrapper[4814]: I0122 06:56:13.284185 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-282rf" event={"ID":"e69d849d-5f5a-4dfd-935e-f7f001a3f19f","Type":"ContainerDied","Data":"7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e"} Jan 22 06:56:15 crc kubenswrapper[4814]: I0122 06:56:15.344128 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:56:15 crc kubenswrapper[4814]: E0122 06:56:15.344905 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:56:16 crc kubenswrapper[4814]: I0122 06:56:16.317389 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-282rf" event={"ID":"e69d849d-5f5a-4dfd-935e-f7f001a3f19f","Type":"ContainerStarted","Data":"db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771"} Jan 22 06:56:16 crc kubenswrapper[4814]: I0122 06:56:16.346893 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-282rf" podStartSLOduration=2.867232972 podStartE2EDuration="12.346874369s" podCreationTimestamp="2026-01-22 06:56:04 +0000 UTC" firstStartedPulling="2026-01-22 06:56:06.213954976 +0000 UTC m=+5852.297443191" lastFinishedPulling="2026-01-22 06:56:15.693596363 +0000 UTC m=+5861.777084588" observedRunningTime="2026-01-22 06:56:16.334659095 +0000 UTC m=+5862.418147330" watchObservedRunningTime="2026-01-22 06:56:16.346874369 +0000 UTC m=+5862.430362584" Jan 22 06:56:25 crc kubenswrapper[4814]: I0122 06:56:25.185581 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:25 crc kubenswrapper[4814]: I0122 06:56:25.186252 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:26 crc kubenswrapper[4814]: I0122 06:56:26.249111 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-282rf" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="registry-server" probeResult="failure" output=< Jan 22 06:56:26 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 06:56:26 crc kubenswrapper[4814]: > Jan 22 06:56:26 crc kubenswrapper[4814]: I0122 06:56:26.344575 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:56:26 crc kubenswrapper[4814]: E0122 06:56:26.345086 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:56:35 crc kubenswrapper[4814]: I0122 06:56:35.243458 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:35 crc kubenswrapper[4814]: I0122 06:56:35.308169 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:36 crc kubenswrapper[4814]: I0122 06:56:36.396501 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-282rf"] Jan 22 06:56:36 crc kubenswrapper[4814]: I0122 06:56:36.526681 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-282rf" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="registry-server" containerID="cri-o://db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771" gracePeriod=2 Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.076742 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.095915 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbf69\" (UniqueName: \"kubernetes.io/projected/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-kube-api-access-qbf69\") pod \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.095975 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-catalog-content\") pod \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.096232 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-utilities\") pod \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\" (UID: \"e69d849d-5f5a-4dfd-935e-f7f001a3f19f\") " Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.097068 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-utilities" (OuterVolumeSpecName: "utilities") pod "e69d849d-5f5a-4dfd-935e-f7f001a3f19f" (UID: "e69d849d-5f5a-4dfd-935e-f7f001a3f19f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.106030 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-kube-api-access-qbf69" (OuterVolumeSpecName: "kube-api-access-qbf69") pod "e69d849d-5f5a-4dfd-935e-f7f001a3f19f" (UID: "e69d849d-5f5a-4dfd-935e-f7f001a3f19f"). InnerVolumeSpecName "kube-api-access-qbf69". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.198744 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.198781 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbf69\" (UniqueName: \"kubernetes.io/projected/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-kube-api-access-qbf69\") on node \"crc\" DevicePath \"\"" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.231288 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e69d849d-5f5a-4dfd-935e-f7f001a3f19f" (UID: "e69d849d-5f5a-4dfd-935e-f7f001a3f19f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.299446 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e69d849d-5f5a-4dfd-935e-f7f001a3f19f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.542170 4814 generic.go:334] "Generic (PLEG): container finished" podID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerID="db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771" exitCode=0 Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.542259 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-282rf" event={"ID":"e69d849d-5f5a-4dfd-935e-f7f001a3f19f","Type":"ContainerDied","Data":"db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771"} Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.542527 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-282rf" event={"ID":"e69d849d-5f5a-4dfd-935e-f7f001a3f19f","Type":"ContainerDied","Data":"ff595c897e6f97d9f2f5bfe9e74ae23a97717685f09b3945eea21f2a0fc12012"} Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.542546 4814 scope.go:117] "RemoveContainer" containerID="db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.542307 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-282rf" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.580020 4814 scope.go:117] "RemoveContainer" containerID="7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.582667 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-282rf"] Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.591819 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-282rf"] Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.613426 4814 scope.go:117] "RemoveContainer" containerID="9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.659538 4814 scope.go:117] "RemoveContainer" containerID="db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771" Jan 22 06:56:37 crc kubenswrapper[4814]: E0122 06:56:37.660239 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771\": container with ID starting with db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771 not found: ID does not exist" containerID="db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.660280 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771"} err="failed to get container status \"db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771\": rpc error: code = NotFound desc = could not find container \"db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771\": container with ID starting with db6f8d4d2bff85880aad17d8364fa06bdabe2d0a263e2faa4b1eaa399a64b771 not found: ID does not exist" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.660305 4814 scope.go:117] "RemoveContainer" containerID="7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e" Jan 22 06:56:37 crc kubenswrapper[4814]: E0122 06:56:37.661239 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e\": container with ID starting with 7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e not found: ID does not exist" containerID="7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.661280 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e"} err="failed to get container status \"7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e\": rpc error: code = NotFound desc = could not find container \"7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e\": container with ID starting with 7660dede69c3f46f253b868b77e061a0a68b19b7f5c204fcb787804e9182dc0e not found: ID does not exist" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.661304 4814 scope.go:117] "RemoveContainer" containerID="9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0" Jan 22 06:56:37 crc kubenswrapper[4814]: E0122 06:56:37.661595 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0\": container with ID starting with 9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0 not found: ID does not exist" containerID="9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0" Jan 22 06:56:37 crc kubenswrapper[4814]: I0122 06:56:37.661635 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0"} err="failed to get container status \"9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0\": rpc error: code = NotFound desc = could not find container \"9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0\": container with ID starting with 9788d5b43aacd835491935adc5791c5b020762c4ec34359a9eb92047a98869b0 not found: ID does not exist" Jan 22 06:56:38 crc kubenswrapper[4814]: I0122 06:56:38.356445 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" path="/var/lib/kubelet/pods/e69d849d-5f5a-4dfd-935e-f7f001a3f19f/volumes" Jan 22 06:56:41 crc kubenswrapper[4814]: I0122 06:56:41.344440 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:56:41 crc kubenswrapper[4814]: E0122 06:56:41.347372 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:56:54 crc kubenswrapper[4814]: I0122 06:56:54.349940 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:56:54 crc kubenswrapper[4814]: E0122 06:56:54.350706 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:57:07 crc kubenswrapper[4814]: I0122 06:57:07.344330 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:57:07 crc kubenswrapper[4814]: E0122 06:57:07.345242 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:57:19 crc kubenswrapper[4814]: I0122 06:57:19.343734 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:57:19 crc kubenswrapper[4814]: E0122 06:57:19.344345 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:57:33 crc kubenswrapper[4814]: I0122 06:57:33.343601 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:57:33 crc kubenswrapper[4814]: E0122 06:57:33.344400 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.626774 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hmh7h"] Jan 22 06:57:41 crc kubenswrapper[4814]: E0122 06:57:41.627528 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="extract-content" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.627543 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="extract-content" Jan 22 06:57:41 crc kubenswrapper[4814]: E0122 06:57:41.627577 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="extract-utilities" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.627584 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="extract-utilities" Jan 22 06:57:41 crc kubenswrapper[4814]: E0122 06:57:41.627594 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="registry-server" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.627600 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="registry-server" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.627804 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="e69d849d-5f5a-4dfd-935e-f7f001a3f19f" containerName="registry-server" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.630796 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.650372 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmh7h"] Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.777234 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-catalog-content\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.777556 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhl85\" (UniqueName: \"kubernetes.io/projected/76521085-a384-4552-9b22-f45e4ae06243-kube-api-access-qhl85\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.777835 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-utilities\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.879595 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-utilities\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.879770 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-catalog-content\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.879844 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhl85\" (UniqueName: \"kubernetes.io/projected/76521085-a384-4552-9b22-f45e4ae06243-kube-api-access-qhl85\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.880047 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-utilities\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.880370 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-catalog-content\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.900274 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhl85\" (UniqueName: \"kubernetes.io/projected/76521085-a384-4552-9b22-f45e4ae06243-kube-api-access-qhl85\") pod \"redhat-marketplace-hmh7h\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:41 crc kubenswrapper[4814]: I0122 06:57:41.953916 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:42 crc kubenswrapper[4814]: I0122 06:57:42.454145 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmh7h"] Jan 22 06:57:43 crc kubenswrapper[4814]: I0122 06:57:43.142683 4814 generic.go:334] "Generic (PLEG): container finished" podID="76521085-a384-4552-9b22-f45e4ae06243" containerID="0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2" exitCode=0 Jan 22 06:57:43 crc kubenswrapper[4814]: I0122 06:57:43.142755 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmh7h" event={"ID":"76521085-a384-4552-9b22-f45e4ae06243","Type":"ContainerDied","Data":"0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2"} Jan 22 06:57:43 crc kubenswrapper[4814]: I0122 06:57:43.143980 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmh7h" event={"ID":"76521085-a384-4552-9b22-f45e4ae06243","Type":"ContainerStarted","Data":"2243e322635b0dc07ad1d5a2063b3fbdbd95c91d45401c311903ebd7f56f0fce"} Jan 22 06:57:44 crc kubenswrapper[4814]: I0122 06:57:44.159002 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmh7h" event={"ID":"76521085-a384-4552-9b22-f45e4ae06243","Type":"ContainerStarted","Data":"dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39"} Jan 22 06:57:45 crc kubenswrapper[4814]: I0122 06:57:45.168559 4814 generic.go:334] "Generic (PLEG): container finished" podID="76521085-a384-4552-9b22-f45e4ae06243" containerID="dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39" exitCode=0 Jan 22 06:57:45 crc kubenswrapper[4814]: I0122 06:57:45.168609 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmh7h" event={"ID":"76521085-a384-4552-9b22-f45e4ae06243","Type":"ContainerDied","Data":"dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39"} Jan 22 06:57:45 crc kubenswrapper[4814]: I0122 06:57:45.171589 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 06:57:46 crc kubenswrapper[4814]: I0122 06:57:46.177776 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmh7h" event={"ID":"76521085-a384-4552-9b22-f45e4ae06243","Type":"ContainerStarted","Data":"cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf"} Jan 22 06:57:46 crc kubenswrapper[4814]: I0122 06:57:46.206504 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hmh7h" podStartSLOduration=2.796872629 podStartE2EDuration="5.206482484s" podCreationTimestamp="2026-01-22 06:57:41 +0000 UTC" firstStartedPulling="2026-01-22 06:57:43.144949033 +0000 UTC m=+5949.228437248" lastFinishedPulling="2026-01-22 06:57:45.554558888 +0000 UTC m=+5951.638047103" observedRunningTime="2026-01-22 06:57:46.197791193 +0000 UTC m=+5952.281279418" watchObservedRunningTime="2026-01-22 06:57:46.206482484 +0000 UTC m=+5952.289970699" Jan 22 06:57:46 crc kubenswrapper[4814]: I0122 06:57:46.344321 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:57:46 crc kubenswrapper[4814]: E0122 06:57:46.344563 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:57:51 crc kubenswrapper[4814]: I0122 06:57:51.954691 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:51 crc kubenswrapper[4814]: I0122 06:57:51.956241 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:52 crc kubenswrapper[4814]: I0122 06:57:52.004898 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:52 crc kubenswrapper[4814]: I0122 06:57:52.306485 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:55 crc kubenswrapper[4814]: I0122 06:57:55.644275 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmh7h"] Jan 22 06:57:55 crc kubenswrapper[4814]: I0122 06:57:55.645132 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hmh7h" podUID="76521085-a384-4552-9b22-f45e4ae06243" containerName="registry-server" containerID="cri-o://cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf" gracePeriod=2 Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.146757 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.268753 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhl85\" (UniqueName: \"kubernetes.io/projected/76521085-a384-4552-9b22-f45e4ae06243-kube-api-access-qhl85\") pod \"76521085-a384-4552-9b22-f45e4ae06243\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.269314 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-utilities\") pod \"76521085-a384-4552-9b22-f45e4ae06243\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.269364 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-catalog-content\") pod \"76521085-a384-4552-9b22-f45e4ae06243\" (UID: \"76521085-a384-4552-9b22-f45e4ae06243\") " Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.270486 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-utilities" (OuterVolumeSpecName: "utilities") pod "76521085-a384-4552-9b22-f45e4ae06243" (UID: "76521085-a384-4552-9b22-f45e4ae06243"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.275150 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76521085-a384-4552-9b22-f45e4ae06243-kube-api-access-qhl85" (OuterVolumeSpecName: "kube-api-access-qhl85") pod "76521085-a384-4552-9b22-f45e4ae06243" (UID: "76521085-a384-4552-9b22-f45e4ae06243"). InnerVolumeSpecName "kube-api-access-qhl85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.286685 4814 generic.go:334] "Generic (PLEG): container finished" podID="76521085-a384-4552-9b22-f45e4ae06243" containerID="cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf" exitCode=0 Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.286728 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmh7h" event={"ID":"76521085-a384-4552-9b22-f45e4ae06243","Type":"ContainerDied","Data":"cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf"} Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.286783 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hmh7h" event={"ID":"76521085-a384-4552-9b22-f45e4ae06243","Type":"ContainerDied","Data":"2243e322635b0dc07ad1d5a2063b3fbdbd95c91d45401c311903ebd7f56f0fce"} Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.286805 4814 scope.go:117] "RemoveContainer" containerID="cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.286801 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hmh7h" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.299181 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76521085-a384-4552-9b22-f45e4ae06243" (UID: "76521085-a384-4552-9b22-f45e4ae06243"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.349328 4814 scope.go:117] "RemoveContainer" containerID="dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.370741 4814 scope.go:117] "RemoveContainer" containerID="0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.371927 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.371986 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76521085-a384-4552-9b22-f45e4ae06243-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.372002 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhl85\" (UniqueName: \"kubernetes.io/projected/76521085-a384-4552-9b22-f45e4ae06243-kube-api-access-qhl85\") on node \"crc\" DevicePath \"\"" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.421014 4814 scope.go:117] "RemoveContainer" containerID="cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf" Jan 22 06:57:56 crc kubenswrapper[4814]: E0122 06:57:56.421397 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf\": container with ID starting with cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf not found: ID does not exist" containerID="cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.421427 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf"} err="failed to get container status \"cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf\": rpc error: code = NotFound desc = could not find container \"cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf\": container with ID starting with cc89b674db62794e91d6da2cad79b6d1e0ff39b3e74575168b866a3efa9e17bf not found: ID does not exist" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.421449 4814 scope.go:117] "RemoveContainer" containerID="dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39" Jan 22 06:57:56 crc kubenswrapper[4814]: E0122 06:57:56.421714 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39\": container with ID starting with dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39 not found: ID does not exist" containerID="dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.421735 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39"} err="failed to get container status \"dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39\": rpc error: code = NotFound desc = could not find container \"dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39\": container with ID starting with dc06985cb54490120f24eca2a9fdf714a10d55b2de5a866a30ad8ec74b8ade39 not found: ID does not exist" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.421748 4814 scope.go:117] "RemoveContainer" containerID="0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2" Jan 22 06:57:56 crc kubenswrapper[4814]: E0122 06:57:56.422074 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2\": container with ID starting with 0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2 not found: ID does not exist" containerID="0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.422101 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2"} err="failed to get container status \"0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2\": rpc error: code = NotFound desc = could not find container \"0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2\": container with ID starting with 0d17372a7665349471d462d0f4830a06b2962440aa0554f5a1ae0acdf11a87c2 not found: ID does not exist" Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.620570 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmh7h"] Jan 22 06:57:56 crc kubenswrapper[4814]: I0122 06:57:56.629611 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hmh7h"] Jan 22 06:57:58 crc kubenswrapper[4814]: I0122 06:57:58.359299 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76521085-a384-4552-9b22-f45e4ae06243" path="/var/lib/kubelet/pods/76521085-a384-4552-9b22-f45e4ae06243/volumes" Jan 22 06:57:59 crc kubenswrapper[4814]: I0122 06:57:59.343651 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:57:59 crc kubenswrapper[4814]: E0122 06:57:59.344543 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:58:14 crc kubenswrapper[4814]: I0122 06:58:14.350822 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:58:14 crc kubenswrapper[4814]: E0122 06:58:14.351502 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:58:29 crc kubenswrapper[4814]: I0122 06:58:29.344818 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:58:29 crc kubenswrapper[4814]: E0122 06:58:29.345612 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:58:42 crc kubenswrapper[4814]: I0122 06:58:42.344269 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:58:42 crc kubenswrapper[4814]: E0122 06:58:42.345007 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:58:57 crc kubenswrapper[4814]: I0122 06:58:57.344372 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:58:57 crc kubenswrapper[4814]: E0122 06:58:57.345417 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:59:09 crc kubenswrapper[4814]: I0122 06:59:09.344704 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:59:09 crc kubenswrapper[4814]: E0122 06:59:09.345458 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 06:59:23 crc kubenswrapper[4814]: I0122 06:59:23.344619 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 06:59:24 crc kubenswrapper[4814]: I0122 06:59:24.023313 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"2a89f926dad5dfa6c4e8d50be6391a2e186d120bcf658bb5de70dd8fda44ac64"} Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.172383 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7"] Jan 22 07:00:00 crc kubenswrapper[4814]: E0122 07:00:00.173302 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76521085-a384-4552-9b22-f45e4ae06243" containerName="registry-server" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.173317 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="76521085-a384-4552-9b22-f45e4ae06243" containerName="registry-server" Jan 22 07:00:00 crc kubenswrapper[4814]: E0122 07:00:00.173345 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76521085-a384-4552-9b22-f45e4ae06243" containerName="extract-content" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.173351 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="76521085-a384-4552-9b22-f45e4ae06243" containerName="extract-content" Jan 22 07:00:00 crc kubenswrapper[4814]: E0122 07:00:00.173370 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76521085-a384-4552-9b22-f45e4ae06243" containerName="extract-utilities" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.173376 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="76521085-a384-4552-9b22-f45e4ae06243" containerName="extract-utilities" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.173567 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="76521085-a384-4552-9b22-f45e4ae06243" containerName="registry-server" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.174299 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.185933 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.185944 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.188551 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7"] Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.345160 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq6cp\" (UniqueName: \"kubernetes.io/projected/ca193c7f-5d11-417e-9466-fda0d587b330-kube-api-access-fq6cp\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.345240 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca193c7f-5d11-417e-9466-fda0d587b330-secret-volume\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.345508 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca193c7f-5d11-417e-9466-fda0d587b330-config-volume\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.446881 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca193c7f-5d11-417e-9466-fda0d587b330-config-volume\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.446975 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq6cp\" (UniqueName: \"kubernetes.io/projected/ca193c7f-5d11-417e-9466-fda0d587b330-kube-api-access-fq6cp\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.447032 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca193c7f-5d11-417e-9466-fda0d587b330-secret-volume\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.447753 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca193c7f-5d11-417e-9466-fda0d587b330-config-volume\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.462214 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca193c7f-5d11-417e-9466-fda0d587b330-secret-volume\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.481899 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq6cp\" (UniqueName: \"kubernetes.io/projected/ca193c7f-5d11-417e-9466-fda0d587b330-kube-api-access-fq6cp\") pod \"collect-profiles-29484420-nn9d7\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:00 crc kubenswrapper[4814]: I0122 07:00:00.502899 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:01 crc kubenswrapper[4814]: I0122 07:00:01.035193 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7"] Jan 22 07:00:01 crc kubenswrapper[4814]: I0122 07:00:01.765988 4814 generic.go:334] "Generic (PLEG): container finished" podID="ca193c7f-5d11-417e-9466-fda0d587b330" containerID="384c9837335ce0282cc923040152166e59f638639f47032d8ed7a050959bd024" exitCode=0 Jan 22 07:00:01 crc kubenswrapper[4814]: I0122 07:00:01.766048 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" event={"ID":"ca193c7f-5d11-417e-9466-fda0d587b330","Type":"ContainerDied","Data":"384c9837335ce0282cc923040152166e59f638639f47032d8ed7a050959bd024"} Jan 22 07:00:01 crc kubenswrapper[4814]: I0122 07:00:01.766299 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" event={"ID":"ca193c7f-5d11-417e-9466-fda0d587b330","Type":"ContainerStarted","Data":"f978e11c802e5d05edf40f954e62355eb9574cb5faa2d1ab8270685ff5af24c8"} Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.176088 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.322911 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca193c7f-5d11-417e-9466-fda0d587b330-secret-volume\") pod \"ca193c7f-5d11-417e-9466-fda0d587b330\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.323118 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca193c7f-5d11-417e-9466-fda0d587b330-config-volume\") pod \"ca193c7f-5d11-417e-9466-fda0d587b330\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.323142 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fq6cp\" (UniqueName: \"kubernetes.io/projected/ca193c7f-5d11-417e-9466-fda0d587b330-kube-api-access-fq6cp\") pod \"ca193c7f-5d11-417e-9466-fda0d587b330\" (UID: \"ca193c7f-5d11-417e-9466-fda0d587b330\") " Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.323735 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca193c7f-5d11-417e-9466-fda0d587b330-config-volume" (OuterVolumeSpecName: "config-volume") pod "ca193c7f-5d11-417e-9466-fda0d587b330" (UID: "ca193c7f-5d11-417e-9466-fda0d587b330"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.330342 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca193c7f-5d11-417e-9466-fda0d587b330-kube-api-access-fq6cp" (OuterVolumeSpecName: "kube-api-access-fq6cp") pod "ca193c7f-5d11-417e-9466-fda0d587b330" (UID: "ca193c7f-5d11-417e-9466-fda0d587b330"). InnerVolumeSpecName "kube-api-access-fq6cp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.333293 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca193c7f-5d11-417e-9466-fda0d587b330-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ca193c7f-5d11-417e-9466-fda0d587b330" (UID: "ca193c7f-5d11-417e-9466-fda0d587b330"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.425718 4814 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca193c7f-5d11-417e-9466-fda0d587b330-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.426094 4814 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca193c7f-5d11-417e-9466-fda0d587b330-config-volume\") on node \"crc\" DevicePath \"\"" Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.426111 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fq6cp\" (UniqueName: \"kubernetes.io/projected/ca193c7f-5d11-417e-9466-fda0d587b330-kube-api-access-fq6cp\") on node \"crc\" DevicePath \"\"" Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.786724 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" event={"ID":"ca193c7f-5d11-417e-9466-fda0d587b330","Type":"ContainerDied","Data":"f978e11c802e5d05edf40f954e62355eb9574cb5faa2d1ab8270685ff5af24c8"} Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.786797 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29484420-nn9d7" Jan 22 07:00:03 crc kubenswrapper[4814]: I0122 07:00:03.786775 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f978e11c802e5d05edf40f954e62355eb9574cb5faa2d1ab8270685ff5af24c8" Jan 22 07:00:04 crc kubenswrapper[4814]: I0122 07:00:04.270585 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5"] Jan 22 07:00:04 crc kubenswrapper[4814]: I0122 07:00:04.278849 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29484375-5zqr5"] Jan 22 07:00:04 crc kubenswrapper[4814]: I0122 07:00:04.357031 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9808200d-e1b4-4644-80f3-af964fcdd471" path="/var/lib/kubelet/pods/9808200d-e1b4-4644-80f3-af964fcdd471/volumes" Jan 22 07:00:45 crc kubenswrapper[4814]: I0122 07:00:45.585748 4814 scope.go:117] "RemoveContainer" containerID="4cfcd740a79acb5231250b6f608754dbe65d82b441be5a90ff30536e25347e85" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.150702 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29484421-wmc9l"] Jan 22 07:01:00 crc kubenswrapper[4814]: E0122 07:01:00.151544 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca193c7f-5d11-417e-9466-fda0d587b330" containerName="collect-profiles" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.151557 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca193c7f-5d11-417e-9466-fda0d587b330" containerName="collect-profiles" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.151753 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca193c7f-5d11-417e-9466-fda0d587b330" containerName="collect-profiles" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.152352 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.216112 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29484421-wmc9l"] Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.272545 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28l8t\" (UniqueName: \"kubernetes.io/projected/59517126-805f-4374-8275-c3475d33c32e-kube-api-access-28l8t\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.272617 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-config-data\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.272808 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-fernet-keys\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.272870 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-combined-ca-bundle\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.374182 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28l8t\" (UniqueName: \"kubernetes.io/projected/59517126-805f-4374-8275-c3475d33c32e-kube-api-access-28l8t\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.374344 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-config-data\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.374455 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-fernet-keys\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.374530 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-combined-ca-bundle\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.381166 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-combined-ca-bundle\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.382263 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-fernet-keys\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.384068 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-config-data\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.410931 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28l8t\" (UniqueName: \"kubernetes.io/projected/59517126-805f-4374-8275-c3475d33c32e-kube-api-access-28l8t\") pod \"keystone-cron-29484421-wmc9l\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.475058 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:00 crc kubenswrapper[4814]: I0122 07:01:00.970264 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29484421-wmc9l"] Jan 22 07:01:01 crc kubenswrapper[4814]: I0122 07:01:01.288793 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484421-wmc9l" event={"ID":"59517126-805f-4374-8275-c3475d33c32e","Type":"ContainerStarted","Data":"ebe30c6eccddbf65617762a8765b59034d9317cbe37e152c3a9594e47ad2574b"} Jan 22 07:01:01 crc kubenswrapper[4814]: I0122 07:01:01.289106 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484421-wmc9l" event={"ID":"59517126-805f-4374-8275-c3475d33c32e","Type":"ContainerStarted","Data":"ecb8fb6dcfe27c6603c25910aaac29f163f9a22157b50c53b5ba3e3b86c53ffc"} Jan 22 07:01:01 crc kubenswrapper[4814]: I0122 07:01:01.310434 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29484421-wmc9l" podStartSLOduration=1.310412093 podStartE2EDuration="1.310412093s" podCreationTimestamp="2026-01-22 07:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-22 07:01:01.306145758 +0000 UTC m=+6147.389633973" watchObservedRunningTime="2026-01-22 07:01:01.310412093 +0000 UTC m=+6147.393900318" Jan 22 07:01:04 crc kubenswrapper[4814]: I0122 07:01:04.318339 4814 generic.go:334] "Generic (PLEG): container finished" podID="59517126-805f-4374-8275-c3475d33c32e" containerID="ebe30c6eccddbf65617762a8765b59034d9317cbe37e152c3a9594e47ad2574b" exitCode=0 Jan 22 07:01:04 crc kubenswrapper[4814]: I0122 07:01:04.318442 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484421-wmc9l" event={"ID":"59517126-805f-4374-8275-c3475d33c32e","Type":"ContainerDied","Data":"ebe30c6eccddbf65617762a8765b59034d9317cbe37e152c3a9594e47ad2574b"} Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.794437 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.898330 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-config-data\") pod \"59517126-805f-4374-8275-c3475d33c32e\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.898494 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28l8t\" (UniqueName: \"kubernetes.io/projected/59517126-805f-4374-8275-c3475d33c32e-kube-api-access-28l8t\") pod \"59517126-805f-4374-8275-c3475d33c32e\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.898555 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-combined-ca-bundle\") pod \"59517126-805f-4374-8275-c3475d33c32e\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.898588 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-fernet-keys\") pod \"59517126-805f-4374-8275-c3475d33c32e\" (UID: \"59517126-805f-4374-8275-c3475d33c32e\") " Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.931855 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "59517126-805f-4374-8275-c3475d33c32e" (UID: "59517126-805f-4374-8275-c3475d33c32e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.931964 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59517126-805f-4374-8275-c3475d33c32e-kube-api-access-28l8t" (OuterVolumeSpecName: "kube-api-access-28l8t") pod "59517126-805f-4374-8275-c3475d33c32e" (UID: "59517126-805f-4374-8275-c3475d33c32e"). InnerVolumeSpecName "kube-api-access-28l8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.940906 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "59517126-805f-4374-8275-c3475d33c32e" (UID: "59517126-805f-4374-8275-c3475d33c32e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:01:05 crc kubenswrapper[4814]: I0122 07:01:05.975191 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-config-data" (OuterVolumeSpecName: "config-data") pod "59517126-805f-4374-8275-c3475d33c32e" (UID: "59517126-805f-4374-8275-c3475d33c32e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:01:06 crc kubenswrapper[4814]: I0122 07:01:06.004544 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:01:06 crc kubenswrapper[4814]: I0122 07:01:06.004577 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28l8t\" (UniqueName: \"kubernetes.io/projected/59517126-805f-4374-8275-c3475d33c32e-kube-api-access-28l8t\") on node \"crc\" DevicePath \"\"" Jan 22 07:01:06 crc kubenswrapper[4814]: I0122 07:01:06.004587 4814 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 22 07:01:06 crc kubenswrapper[4814]: I0122 07:01:06.004596 4814 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/59517126-805f-4374-8275-c3475d33c32e-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 22 07:01:06 crc kubenswrapper[4814]: I0122 07:01:06.336728 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29484421-wmc9l" event={"ID":"59517126-805f-4374-8275-c3475d33c32e","Type":"ContainerDied","Data":"ecb8fb6dcfe27c6603c25910aaac29f163f9a22157b50c53b5ba3e3b86c53ffc"} Jan 22 07:01:06 crc kubenswrapper[4814]: I0122 07:01:06.336783 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ecb8fb6dcfe27c6603c25910aaac29f163f9a22157b50c53b5ba3e3b86c53ffc" Jan 22 07:01:06 crc kubenswrapper[4814]: I0122 07:01:06.336852 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29484421-wmc9l" Jan 22 07:01:49 crc kubenswrapper[4814]: I0122 07:01:49.614404 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:01:49 crc kubenswrapper[4814]: I0122 07:01:49.615288 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:02:19 crc kubenswrapper[4814]: I0122 07:02:19.613751 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:02:19 crc kubenswrapper[4814]: I0122 07:02:19.614492 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:02:49 crc kubenswrapper[4814]: I0122 07:02:49.614435 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:02:49 crc kubenswrapper[4814]: I0122 07:02:49.615095 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:02:49 crc kubenswrapper[4814]: I0122 07:02:49.615150 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 07:02:49 crc kubenswrapper[4814]: I0122 07:02:49.615930 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2a89f926dad5dfa6c4e8d50be6391a2e186d120bcf658bb5de70dd8fda44ac64"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:02:49 crc kubenswrapper[4814]: I0122 07:02:49.616018 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://2a89f926dad5dfa6c4e8d50be6391a2e186d120bcf658bb5de70dd8fda44ac64" gracePeriod=600 Jan 22 07:02:50 crc kubenswrapper[4814]: I0122 07:02:50.335339 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="2a89f926dad5dfa6c4e8d50be6391a2e186d120bcf658bb5de70dd8fda44ac64" exitCode=0 Jan 22 07:02:50 crc kubenswrapper[4814]: I0122 07:02:50.335448 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"2a89f926dad5dfa6c4e8d50be6391a2e186d120bcf658bb5de70dd8fda44ac64"} Jan 22 07:02:50 crc kubenswrapper[4814]: I0122 07:02:50.335952 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerStarted","Data":"cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa"} Jan 22 07:02:50 crc kubenswrapper[4814]: I0122 07:02:50.335984 4814 scope.go:117] "RemoveContainer" containerID="19cbd3f895ca99add6a6f102269b4264c2d3ddbd07b0a65c48daee94d41af61a" Jan 22 07:03:23 crc kubenswrapper[4814]: I0122 07:03:23.698511 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rpstz"] Jan 22 07:03:23 crc kubenswrapper[4814]: E0122 07:03:23.699311 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59517126-805f-4374-8275-c3475d33c32e" containerName="keystone-cron" Jan 22 07:03:23 crc kubenswrapper[4814]: I0122 07:03:23.699323 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="59517126-805f-4374-8275-c3475d33c32e" containerName="keystone-cron" Jan 22 07:03:23 crc kubenswrapper[4814]: I0122 07:03:23.699507 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="59517126-805f-4374-8275-c3475d33c32e" containerName="keystone-cron" Jan 22 07:03:23 crc kubenswrapper[4814]: I0122 07:03:23.700785 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:23 crc kubenswrapper[4814]: I0122 07:03:23.711936 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rpstz"] Jan 22 07:03:23 crc kubenswrapper[4814]: I0122 07:03:23.901155 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-utilities\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:23 crc kubenswrapper[4814]: I0122 07:03:23.901266 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5jxc\" (UniqueName: \"kubernetes.io/projected/df004b12-8659-4d61-80fa-23f1e435d86d-kube-api-access-l5jxc\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:23 crc kubenswrapper[4814]: I0122 07:03:23.901360 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-catalog-content\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.003178 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5jxc\" (UniqueName: \"kubernetes.io/projected/df004b12-8659-4d61-80fa-23f1e435d86d-kube-api-access-l5jxc\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.003276 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-catalog-content\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.003690 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-utilities\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.004435 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-catalog-content\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.004779 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-utilities\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.023520 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5jxc\" (UniqueName: \"kubernetes.io/projected/df004b12-8659-4d61-80fa-23f1e435d86d-kube-api-access-l5jxc\") pod \"community-operators-rpstz\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.041263 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.589470 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rpstz"] Jan 22 07:03:24 crc kubenswrapper[4814]: W0122 07:03:24.611643 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf004b12_8659_4d61_80fa_23f1e435d86d.slice/crio-49a009f9108099053183c2a34213acae3c63d39e610d7aca7f7efcbcd33cf5f5 WatchSource:0}: Error finding container 49a009f9108099053183c2a34213acae3c63d39e610d7aca7f7efcbcd33cf5f5: Status 404 returned error can't find the container with id 49a009f9108099053183c2a34213acae3c63d39e610d7aca7f7efcbcd33cf5f5 Jan 22 07:03:24 crc kubenswrapper[4814]: I0122 07:03:24.650839 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rpstz" event={"ID":"df004b12-8659-4d61-80fa-23f1e435d86d","Type":"ContainerStarted","Data":"49a009f9108099053183c2a34213acae3c63d39e610d7aca7f7efcbcd33cf5f5"} Jan 22 07:03:25 crc kubenswrapper[4814]: I0122 07:03:25.660889 4814 generic.go:334] "Generic (PLEG): container finished" podID="df004b12-8659-4d61-80fa-23f1e435d86d" containerID="4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685" exitCode=0 Jan 22 07:03:25 crc kubenswrapper[4814]: I0122 07:03:25.661046 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rpstz" event={"ID":"df004b12-8659-4d61-80fa-23f1e435d86d","Type":"ContainerDied","Data":"4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685"} Jan 22 07:03:25 crc kubenswrapper[4814]: I0122 07:03:25.663065 4814 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 22 07:03:26 crc kubenswrapper[4814]: I0122 07:03:26.669614 4814 generic.go:334] "Generic (PLEG): container finished" podID="a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" containerID="d3e3c51083cd7432a94f444b017b7b1bdf1c73bc695f74cce6fc779e70a1e335" exitCode=1 Jan 22 07:03:26 crc kubenswrapper[4814]: I0122 07:03:26.669702 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533","Type":"ContainerDied","Data":"d3e3c51083cd7432a94f444b017b7b1bdf1c73bc695f74cce6fc779e70a1e335"} Jan 22 07:03:26 crc kubenswrapper[4814]: I0122 07:03:26.671758 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rpstz" event={"ID":"df004b12-8659-4d61-80fa-23f1e435d86d","Type":"ContainerStarted","Data":"ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862"} Jan 22 07:03:27 crc kubenswrapper[4814]: I0122 07:03:27.681393 4814 generic.go:334] "Generic (PLEG): container finished" podID="df004b12-8659-4d61-80fa-23f1e435d86d" containerID="ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862" exitCode=0 Jan 22 07:03:27 crc kubenswrapper[4814]: I0122 07:03:27.681503 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rpstz" event={"ID":"df004b12-8659-4d61-80fa-23f1e435d86d","Type":"ContainerDied","Data":"ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862"} Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.196840 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.381485 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlpr5\" (UniqueName: \"kubernetes.io/projected/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-kube-api-access-nlpr5\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.381595 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-workdir\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.381655 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-config-data\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.381696 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ca-certs\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.381718 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config-secret\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.381834 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.381953 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ssh-key\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.381980 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-temporary\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.382001 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config\") pod \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\" (UID: \"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533\") " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.383468 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-config-data" (OuterVolumeSpecName: "config-data") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.384002 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.388681 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.405435 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "test-operator-logs") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.412888 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-kube-api-access-nlpr5" (OuterVolumeSpecName: "kube-api-access-nlpr5") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "kube-api-access-nlpr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.421228 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.423671 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.426999 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.444368 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" (UID: "a3b01ea9-b2ea-4634-9d07-cad4bf7e7533"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485267 4814 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485296 4814 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485306 4814 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485317 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485356 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlpr5\" (UniqueName: \"kubernetes.io/projected/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-kube-api-access-nlpr5\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485367 4814 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485375 4814 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-config-data\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485382 4814 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.485390 4814 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/a3b01ea9-b2ea-4634-9d07-cad4bf7e7533-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.515149 4814 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.587353 4814 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.693893 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" event={"ID":"a3b01ea9-b2ea-4634-9d07-cad4bf7e7533","Type":"ContainerDied","Data":"db33fa3ccddd154da3b71b97f35b6014a1517065a157fa3151ffe8bb63ee4963"} Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.693935 4814 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db33fa3ccddd154da3b71b97f35b6014a1517065a157fa3151ffe8bb63ee4963" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.693975 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest-s01-single-thread-testing" Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.698621 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rpstz" event={"ID":"df004b12-8659-4d61-80fa-23f1e435d86d","Type":"ContainerStarted","Data":"3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da"} Jan 22 07:03:28 crc kubenswrapper[4814]: I0122 07:03:28.720551 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rpstz" podStartSLOduration=3.091645697 podStartE2EDuration="5.720532608s" podCreationTimestamp="2026-01-22 07:03:23 +0000 UTC" firstStartedPulling="2026-01-22 07:03:25.662839511 +0000 UTC m=+6291.746327726" lastFinishedPulling="2026-01-22 07:03:28.291726422 +0000 UTC m=+6294.375214637" observedRunningTime="2026-01-22 07:03:28.714144368 +0000 UTC m=+6294.797632583" watchObservedRunningTime="2026-01-22 07:03:28.720532608 +0000 UTC m=+6294.804020823" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.062143 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 22 07:03:33 crc kubenswrapper[4814]: E0122 07:03:33.063049 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" containerName="tempest-tests-tempest-tests-runner" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.063061 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" containerName="tempest-tests-tempest-tests-runner" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.063275 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3b01ea9-b2ea-4634-9d07-cad4bf7e7533" containerName="tempest-tests-tempest-tests-runner" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.063952 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.065876 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tjss5" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.072327 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.174683 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vldmx\" (UniqueName: \"kubernetes.io/projected/06659c9c-dbe3-422a-990d-c68c93e555db-kube-api-access-vldmx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"06659c9c-dbe3-422a-990d-c68c93e555db\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.174923 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"06659c9c-dbe3-422a-990d-c68c93e555db\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.276376 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"06659c9c-dbe3-422a-990d-c68c93e555db\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.276595 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vldmx\" (UniqueName: \"kubernetes.io/projected/06659c9c-dbe3-422a-990d-c68c93e555db-kube-api-access-vldmx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"06659c9c-dbe3-422a-990d-c68c93e555db\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.279206 4814 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"06659c9c-dbe3-422a-990d-c68c93e555db\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.295702 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vldmx\" (UniqueName: \"kubernetes.io/projected/06659c9c-dbe3-422a-990d-c68c93e555db-kube-api-access-vldmx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"06659c9c-dbe3-422a-990d-c68c93e555db\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.318859 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"06659c9c-dbe3-422a-990d-c68c93e555db\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:33 crc kubenswrapper[4814]: I0122 07:03:33.383697 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 22 07:03:34 crc kubenswrapper[4814]: I0122 07:03:34.042817 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:34 crc kubenswrapper[4814]: I0122 07:03:34.043057 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:34 crc kubenswrapper[4814]: I0122 07:03:34.109442 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:34 crc kubenswrapper[4814]: I0122 07:03:34.293288 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 22 07:03:34 crc kubenswrapper[4814]: I0122 07:03:34.754294 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"06659c9c-dbe3-422a-990d-c68c93e555db","Type":"ContainerStarted","Data":"716eb3d90770d0435a8afb6131897b842967d7e8f42d32eba41ad1b1d16c7161"} Jan 22 07:03:34 crc kubenswrapper[4814]: I0122 07:03:34.820224 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:34 crc kubenswrapper[4814]: I0122 07:03:34.869294 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rpstz"] Jan 22 07:03:35 crc kubenswrapper[4814]: I0122 07:03:35.766151 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"06659c9c-dbe3-422a-990d-c68c93e555db","Type":"ContainerStarted","Data":"5b5c47bcc0589b0ffcd5c7841be9b26700e20ba4a5546e9f8fb8d48d7d4ee867"} Jan 22 07:03:35 crc kubenswrapper[4814]: I0122 07:03:35.787451 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=1.840132556 podStartE2EDuration="2.787422942s" podCreationTimestamp="2026-01-22 07:03:33 +0000 UTC" firstStartedPulling="2026-01-22 07:03:34.299075099 +0000 UTC m=+6300.382563314" lastFinishedPulling="2026-01-22 07:03:35.246365445 +0000 UTC m=+6301.329853700" observedRunningTime="2026-01-22 07:03:35.780743382 +0000 UTC m=+6301.864231607" watchObservedRunningTime="2026-01-22 07:03:35.787422942 +0000 UTC m=+6301.870911197" Jan 22 07:03:36 crc kubenswrapper[4814]: I0122 07:03:36.776078 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rpstz" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" containerName="registry-server" containerID="cri-o://3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da" gracePeriod=2 Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.277459 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.361494 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-utilities\") pod \"df004b12-8659-4d61-80fa-23f1e435d86d\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.361754 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-catalog-content\") pod \"df004b12-8659-4d61-80fa-23f1e435d86d\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.361858 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5jxc\" (UniqueName: \"kubernetes.io/projected/df004b12-8659-4d61-80fa-23f1e435d86d-kube-api-access-l5jxc\") pod \"df004b12-8659-4d61-80fa-23f1e435d86d\" (UID: \"df004b12-8659-4d61-80fa-23f1e435d86d\") " Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.363719 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-utilities" (OuterVolumeSpecName: "utilities") pod "df004b12-8659-4d61-80fa-23f1e435d86d" (UID: "df004b12-8659-4d61-80fa-23f1e435d86d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.374835 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df004b12-8659-4d61-80fa-23f1e435d86d-kube-api-access-l5jxc" (OuterVolumeSpecName: "kube-api-access-l5jxc") pod "df004b12-8659-4d61-80fa-23f1e435d86d" (UID: "df004b12-8659-4d61-80fa-23f1e435d86d"). InnerVolumeSpecName "kube-api-access-l5jxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.463971 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5jxc\" (UniqueName: \"kubernetes.io/projected/df004b12-8659-4d61-80fa-23f1e435d86d-kube-api-access-l5jxc\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.464026 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.499388 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df004b12-8659-4d61-80fa-23f1e435d86d" (UID: "df004b12-8659-4d61-80fa-23f1e435d86d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.566144 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df004b12-8659-4d61-80fa-23f1e435d86d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.793453 4814 generic.go:334] "Generic (PLEG): container finished" podID="df004b12-8659-4d61-80fa-23f1e435d86d" containerID="3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da" exitCode=0 Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.793499 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rpstz" event={"ID":"df004b12-8659-4d61-80fa-23f1e435d86d","Type":"ContainerDied","Data":"3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da"} Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.793532 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rpstz" event={"ID":"df004b12-8659-4d61-80fa-23f1e435d86d","Type":"ContainerDied","Data":"49a009f9108099053183c2a34213acae3c63d39e610d7aca7f7efcbcd33cf5f5"} Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.793550 4814 scope.go:117] "RemoveContainer" containerID="3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.793611 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rpstz" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.821896 4814 scope.go:117] "RemoveContainer" containerID="ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.843741 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rpstz"] Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.860855 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rpstz"] Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.861489 4814 scope.go:117] "RemoveContainer" containerID="4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.905549 4814 scope.go:117] "RemoveContainer" containerID="3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da" Jan 22 07:03:37 crc kubenswrapper[4814]: E0122 07:03:37.906453 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da\": container with ID starting with 3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da not found: ID does not exist" containerID="3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.906521 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da"} err="failed to get container status \"3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da\": rpc error: code = NotFound desc = could not find container \"3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da\": container with ID starting with 3390ae98e43d1aa1a11c981817a11bb44a495e2a3dc05a20ef61795aaffed0da not found: ID does not exist" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.906553 4814 scope.go:117] "RemoveContainer" containerID="ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862" Jan 22 07:03:37 crc kubenswrapper[4814]: E0122 07:03:37.908719 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862\": container with ID starting with ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862 not found: ID does not exist" containerID="ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.908751 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862"} err="failed to get container status \"ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862\": rpc error: code = NotFound desc = could not find container \"ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862\": container with ID starting with ed1823669be1c8e44cfd3031ce1688aac9f50cd8e544f1723307b77c2471b862 not found: ID does not exist" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.908774 4814 scope.go:117] "RemoveContainer" containerID="4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685" Jan 22 07:03:37 crc kubenswrapper[4814]: E0122 07:03:37.909144 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685\": container with ID starting with 4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685 not found: ID does not exist" containerID="4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685" Jan 22 07:03:37 crc kubenswrapper[4814]: I0122 07:03:37.909190 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685"} err="failed to get container status \"4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685\": rpc error: code = NotFound desc = could not find container \"4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685\": container with ID starting with 4e4edfccd703664d909adb160bcc19d79f0f4869bfcd9db08a400e02e7ceb685 not found: ID does not exist" Jan 22 07:03:38 crc kubenswrapper[4814]: I0122 07:03:38.355835 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" path="/var/lib/kubelet/pods/df004b12-8659-4d61-80fa-23f1e435d86d/volumes" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.082991 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-gp24l/must-gather-k7wzr"] Jan 22 07:04:15 crc kubenswrapper[4814]: E0122 07:04:15.083745 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" containerName="extract-content" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.083758 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" containerName="extract-content" Jan 22 07:04:15 crc kubenswrapper[4814]: E0122 07:04:15.083774 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" containerName="registry-server" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.083800 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" containerName="registry-server" Jan 22 07:04:15 crc kubenswrapper[4814]: E0122 07:04:15.083818 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" containerName="extract-utilities" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.083826 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" containerName="extract-utilities" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.084033 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="df004b12-8659-4d61-80fa-23f1e435d86d" containerName="registry-server" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.084946 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.086247 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4764d95b-fc09-4889-a70c-9ae96b3caec2-must-gather-output\") pod \"must-gather-k7wzr\" (UID: \"4764d95b-fc09-4889-a70c-9ae96b3caec2\") " pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.086364 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwsv6\" (UniqueName: \"kubernetes.io/projected/4764d95b-fc09-4889-a70c-9ae96b3caec2-kube-api-access-gwsv6\") pod \"must-gather-k7wzr\" (UID: \"4764d95b-fc09-4889-a70c-9ae96b3caec2\") " pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.087665 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-gp24l"/"openshift-service-ca.crt" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.087896 4814 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-gp24l"/"default-dockercfg-6qjmh" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.088031 4814 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-gp24l"/"kube-root-ca.crt" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.187748 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwsv6\" (UniqueName: \"kubernetes.io/projected/4764d95b-fc09-4889-a70c-9ae96b3caec2-kube-api-access-gwsv6\") pod \"must-gather-k7wzr\" (UID: \"4764d95b-fc09-4889-a70c-9ae96b3caec2\") " pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.187962 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4764d95b-fc09-4889-a70c-9ae96b3caec2-must-gather-output\") pod \"must-gather-k7wzr\" (UID: \"4764d95b-fc09-4889-a70c-9ae96b3caec2\") " pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.188364 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4764d95b-fc09-4889-a70c-9ae96b3caec2-must-gather-output\") pod \"must-gather-k7wzr\" (UID: \"4764d95b-fc09-4889-a70c-9ae96b3caec2\") " pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.200067 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-gp24l/must-gather-k7wzr"] Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.255251 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwsv6\" (UniqueName: \"kubernetes.io/projected/4764d95b-fc09-4889-a70c-9ae96b3caec2-kube-api-access-gwsv6\") pod \"must-gather-k7wzr\" (UID: \"4764d95b-fc09-4889-a70c-9ae96b3caec2\") " pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.402871 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:15 crc kubenswrapper[4814]: I0122 07:04:15.978782 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-gp24l/must-gather-k7wzr"] Jan 22 07:04:16 crc kubenswrapper[4814]: I0122 07:04:16.146303 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-gp24l/must-gather-k7wzr" event={"ID":"4764d95b-fc09-4889-a70c-9ae96b3caec2","Type":"ContainerStarted","Data":"7cfe0f2fcf0655c2b8ec286d72a3fccb00573b2f0a39c859f52558ba937e7fee"} Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.692227 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j4n8d"] Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.700431 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.725794 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j4n8d"] Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.771308 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gcrb\" (UniqueName: \"kubernetes.io/projected/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-kube-api-access-9gcrb\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.771444 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-catalog-content\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.771552 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-utilities\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.874034 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gcrb\" (UniqueName: \"kubernetes.io/projected/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-kube-api-access-9gcrb\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.874157 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-catalog-content\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.874273 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-utilities\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.875006 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-utilities\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.875317 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-catalog-content\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:26 crc kubenswrapper[4814]: I0122 07:04:26.902794 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gcrb\" (UniqueName: \"kubernetes.io/projected/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-kube-api-access-9gcrb\") pod \"certified-operators-j4n8d\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:27 crc kubenswrapper[4814]: I0122 07:04:27.032608 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:31 crc kubenswrapper[4814]: E0122 07:04:31.184717 4814 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-must-gather:latest" Jan 22 07:04:31 crc kubenswrapper[4814]: E0122 07:04:31.185889 4814 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 22 07:04:31 crc kubenswrapper[4814]: container &Container{Name:gather,Image:quay.io/openstack-k8s-operators/openstack-must-gather:latest,Command:[/bin/bash -c if command -v setsid >/dev/null 2>&1 && command -v ps >/dev/null 2>&1 && command -v pkill >/dev/null 2>&1; then Jan 22 07:04:31 crc kubenswrapper[4814]: HAVE_SESSION_TOOLS=true Jan 22 07:04:31 crc kubenswrapper[4814]: else Jan 22 07:04:31 crc kubenswrapper[4814]: HAVE_SESSION_TOOLS=false Jan 22 07:04:31 crc kubenswrapper[4814]: fi Jan 22 07:04:31 crc kubenswrapper[4814]: Jan 22 07:04:31 crc kubenswrapper[4814]: Jan 22 07:04:31 crc kubenswrapper[4814]: echo "[disk usage checker] Started" Jan 22 07:04:31 crc kubenswrapper[4814]: target_dir="/must-gather" Jan 22 07:04:31 crc kubenswrapper[4814]: usage_percentage_limit="80" Jan 22 07:04:31 crc kubenswrapper[4814]: while true; do Jan 22 07:04:31 crc kubenswrapper[4814]: usage_percentage=$(df -P "$target_dir" | awk 'NR==2 {print $5}' | sed 's/%//') Jan 22 07:04:31 crc kubenswrapper[4814]: echo "[disk usage checker] Volume usage percentage: current = ${usage_percentage} ; allowed = ${usage_percentage_limit}" Jan 22 07:04:31 crc kubenswrapper[4814]: if [ "$usage_percentage" -gt "$usage_percentage_limit" ]; then Jan 22 07:04:31 crc kubenswrapper[4814]: echo "[disk usage checker] Disk usage exceeds the volume percentage of ${usage_percentage_limit} for mounted directory, terminating..." Jan 22 07:04:31 crc kubenswrapper[4814]: if [ "$HAVE_SESSION_TOOLS" = "true" ]; then Jan 22 07:04:31 crc kubenswrapper[4814]: ps -o sess --no-headers | sort -u | while read sid; do Jan 22 07:04:31 crc kubenswrapper[4814]: [[ "$sid" -eq "${$}" ]] && continue Jan 22 07:04:31 crc kubenswrapper[4814]: pkill --signal SIGKILL --session "$sid" Jan 22 07:04:31 crc kubenswrapper[4814]: done Jan 22 07:04:31 crc kubenswrapper[4814]: else Jan 22 07:04:31 crc kubenswrapper[4814]: kill 0 Jan 22 07:04:31 crc kubenswrapper[4814]: fi Jan 22 07:04:31 crc kubenswrapper[4814]: exit 1 Jan 22 07:04:31 crc kubenswrapper[4814]: fi Jan 22 07:04:31 crc kubenswrapper[4814]: sleep 5 Jan 22 07:04:31 crc kubenswrapper[4814]: done & if [ "$HAVE_SESSION_TOOLS" = "true" ]; then Jan 22 07:04:31 crc kubenswrapper[4814]: setsid -w bash <<-MUSTGATHER_EOF Jan 22 07:04:31 crc kubenswrapper[4814]: ADDITIONAL_NAMESPACES=kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko OPENSTACK_DATABASES=ALL SOS_EDPM=all SOS_DECOMPRESS=0 gather Jan 22 07:04:31 crc kubenswrapper[4814]: MUSTGATHER_EOF Jan 22 07:04:31 crc kubenswrapper[4814]: else Jan 22 07:04:31 crc kubenswrapper[4814]: ADDITIONAL_NAMESPACES=kuttl,openshift-storage,openshift-marketplace,openshift-operators,sushy-emulator,tobiko OPENSTACK_DATABASES=ALL SOS_EDPM=all SOS_DECOMPRESS=0 gather Jan 22 07:04:31 crc kubenswrapper[4814]: fi; sync && echo 'Caches written to disk'],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:must-gather-output,ReadOnly:false,MountPath:/must-gather,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gwsv6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod must-gather-k7wzr_openshift-must-gather-gp24l(4764d95b-fc09-4889-a70c-9ae96b3caec2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Jan 22 07:04:31 crc kubenswrapper[4814]: > logger="UnhandledError" Jan 22 07:04:31 crc kubenswrapper[4814]: E0122 07:04:31.187920 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"gather\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"copy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\"]" pod="openshift-must-gather-gp24l/must-gather-k7wzr" podUID="4764d95b-fc09-4889-a70c-9ae96b3caec2" Jan 22 07:04:31 crc kubenswrapper[4814]: E0122 07:04:31.277431 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"gather\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\", failed to \"StartContainer\" for \"copy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-must-gather:latest\\\"\"]" pod="openshift-must-gather-gp24l/must-gather-k7wzr" podUID="4764d95b-fc09-4889-a70c-9ae96b3caec2" Jan 22 07:04:31 crc kubenswrapper[4814]: I0122 07:04:31.594025 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j4n8d"] Jan 22 07:04:31 crc kubenswrapper[4814]: W0122 07:04:31.597754 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf11a0b1a_66a5_4a63_85f2_0a324dfbb61e.slice/crio-4f6874b0d49b14a14e48b72eb0b7d99005f941cf6138c4c28eb6bb2ef1c13e84 WatchSource:0}: Error finding container 4f6874b0d49b14a14e48b72eb0b7d99005f941cf6138c4c28eb6bb2ef1c13e84: Status 404 returned error can't find the container with id 4f6874b0d49b14a14e48b72eb0b7d99005f941cf6138c4c28eb6bb2ef1c13e84 Jan 22 07:04:32 crc kubenswrapper[4814]: I0122 07:04:32.284245 4814 generic.go:334] "Generic (PLEG): container finished" podID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerID="6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19" exitCode=0 Jan 22 07:04:32 crc kubenswrapper[4814]: I0122 07:04:32.284291 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4n8d" event={"ID":"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e","Type":"ContainerDied","Data":"6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19"} Jan 22 07:04:32 crc kubenswrapper[4814]: I0122 07:04:32.284507 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4n8d" event={"ID":"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e","Type":"ContainerStarted","Data":"4f6874b0d49b14a14e48b72eb0b7d99005f941cf6138c4c28eb6bb2ef1c13e84"} Jan 22 07:04:34 crc kubenswrapper[4814]: I0122 07:04:34.308572 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4n8d" event={"ID":"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e","Type":"ContainerStarted","Data":"2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd"} Jan 22 07:04:37 crc kubenswrapper[4814]: I0122 07:04:37.336224 4814 generic.go:334] "Generic (PLEG): container finished" podID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerID="2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd" exitCode=0 Jan 22 07:04:37 crc kubenswrapper[4814]: I0122 07:04:37.336771 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4n8d" event={"ID":"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e","Type":"ContainerDied","Data":"2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd"} Jan 22 07:04:39 crc kubenswrapper[4814]: I0122 07:04:39.354409 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4n8d" event={"ID":"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e","Type":"ContainerStarted","Data":"b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81"} Jan 22 07:04:39 crc kubenswrapper[4814]: I0122 07:04:39.374092 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j4n8d" podStartSLOduration=7.160653999 podStartE2EDuration="13.374072308s" podCreationTimestamp="2026-01-22 07:04:26 +0000 UTC" firstStartedPulling="2026-01-22 07:04:32.286968851 +0000 UTC m=+6358.370457066" lastFinishedPulling="2026-01-22 07:04:38.50038716 +0000 UTC m=+6364.583875375" observedRunningTime="2026-01-22 07:04:39.373664875 +0000 UTC m=+6365.457153110" watchObservedRunningTime="2026-01-22 07:04:39.374072308 +0000 UTC m=+6365.457560523" Jan 22 07:04:41 crc kubenswrapper[4814]: I0122 07:04:41.872429 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-gp24l/must-gather-k7wzr"] Jan 22 07:04:41 crc kubenswrapper[4814]: I0122 07:04:41.886995 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-gp24l/must-gather-k7wzr"] Jan 22 07:04:42 crc kubenswrapper[4814]: I0122 07:04:42.230080 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:42 crc kubenswrapper[4814]: I0122 07:04:42.241436 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4764d95b-fc09-4889-a70c-9ae96b3caec2-must-gather-output\") pod \"4764d95b-fc09-4889-a70c-9ae96b3caec2\" (UID: \"4764d95b-fc09-4889-a70c-9ae96b3caec2\") " Jan 22 07:04:42 crc kubenswrapper[4814]: I0122 07:04:42.241822 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4764d95b-fc09-4889-a70c-9ae96b3caec2-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "4764d95b-fc09-4889-a70c-9ae96b3caec2" (UID: "4764d95b-fc09-4889-a70c-9ae96b3caec2"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:04:42 crc kubenswrapper[4814]: I0122 07:04:42.242015 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwsv6\" (UniqueName: \"kubernetes.io/projected/4764d95b-fc09-4889-a70c-9ae96b3caec2-kube-api-access-gwsv6\") pod \"4764d95b-fc09-4889-a70c-9ae96b3caec2\" (UID: \"4764d95b-fc09-4889-a70c-9ae96b3caec2\") " Jan 22 07:04:42 crc kubenswrapper[4814]: I0122 07:04:42.242999 4814 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/4764d95b-fc09-4889-a70c-9ae96b3caec2-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:42 crc kubenswrapper[4814]: I0122 07:04:42.378340 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-gp24l/must-gather-k7wzr" Jan 22 07:04:42 crc kubenswrapper[4814]: I0122 07:04:42.791953 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4764d95b-fc09-4889-a70c-9ae96b3caec2-kube-api-access-gwsv6" (OuterVolumeSpecName: "kube-api-access-gwsv6") pod "4764d95b-fc09-4889-a70c-9ae96b3caec2" (UID: "4764d95b-fc09-4889-a70c-9ae96b3caec2"). InnerVolumeSpecName "kube-api-access-gwsv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:04:42 crc kubenswrapper[4814]: I0122 07:04:42.853031 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwsv6\" (UniqueName: \"kubernetes.io/projected/4764d95b-fc09-4889-a70c-9ae96b3caec2-kube-api-access-gwsv6\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:44 crc kubenswrapper[4814]: I0122 07:04:44.357562 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4764d95b-fc09-4889-a70c-9ae96b3caec2" path="/var/lib/kubelet/pods/4764d95b-fc09-4889-a70c-9ae96b3caec2/volumes" Jan 22 07:04:47 crc kubenswrapper[4814]: I0122 07:04:47.033449 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:47 crc kubenswrapper[4814]: I0122 07:04:47.033802 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:47 crc kubenswrapper[4814]: I0122 07:04:47.085025 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:47 crc kubenswrapper[4814]: I0122 07:04:47.464232 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:47 crc kubenswrapper[4814]: I0122 07:04:47.534614 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j4n8d"] Jan 22 07:04:49 crc kubenswrapper[4814]: I0122 07:04:49.458032 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j4n8d" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerName="registry-server" containerID="cri-o://b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81" gracePeriod=2 Jan 22 07:04:49 crc kubenswrapper[4814]: I0122 07:04:49.616054 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:04:49 crc kubenswrapper[4814]: I0122 07:04:49.616366 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.028010 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.207596 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gcrb\" (UniqueName: \"kubernetes.io/projected/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-kube-api-access-9gcrb\") pod \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.207774 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-utilities\") pod \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.207949 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-catalog-content\") pod \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\" (UID: \"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e\") " Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.213771 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-utilities" (OuterVolumeSpecName: "utilities") pod "f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" (UID: "f11a0b1a-66a5-4a63-85f2-0a324dfbb61e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.215535 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-kube-api-access-9gcrb" (OuterVolumeSpecName: "kube-api-access-9gcrb") pod "f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" (UID: "f11a0b1a-66a5-4a63-85f2-0a324dfbb61e"). InnerVolumeSpecName "kube-api-access-9gcrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.270474 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" (UID: "f11a0b1a-66a5-4a63-85f2-0a324dfbb61e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.309958 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.310007 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gcrb\" (UniqueName: \"kubernetes.io/projected/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-kube-api-access-9gcrb\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.310018 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.469595 4814 generic.go:334] "Generic (PLEG): container finished" podID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerID="b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81" exitCode=0 Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.469654 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4n8d" event={"ID":"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e","Type":"ContainerDied","Data":"b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81"} Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.469682 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4n8d" event={"ID":"f11a0b1a-66a5-4a63-85f2-0a324dfbb61e","Type":"ContainerDied","Data":"4f6874b0d49b14a14e48b72eb0b7d99005f941cf6138c4c28eb6bb2ef1c13e84"} Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.469702 4814 scope.go:117] "RemoveContainer" containerID="b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.469797 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4n8d" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.505564 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j4n8d"] Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.535372 4814 scope.go:117] "RemoveContainer" containerID="2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.537728 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j4n8d"] Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.563166 4814 scope.go:117] "RemoveContainer" containerID="6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.607988 4814 scope.go:117] "RemoveContainer" containerID="b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81" Jan 22 07:04:50 crc kubenswrapper[4814]: E0122 07:04:50.610472 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81\": container with ID starting with b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81 not found: ID does not exist" containerID="b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.610521 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81"} err="failed to get container status \"b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81\": rpc error: code = NotFound desc = could not find container \"b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81\": container with ID starting with b65a5703808b1783d21101f70baaebf9d307f3807176fd47103476e8ad0b8d81 not found: ID does not exist" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.610566 4814 scope.go:117] "RemoveContainer" containerID="2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd" Jan 22 07:04:50 crc kubenswrapper[4814]: E0122 07:04:50.611228 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd\": container with ID starting with 2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd not found: ID does not exist" containerID="2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.611263 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd"} err="failed to get container status \"2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd\": rpc error: code = NotFound desc = could not find container \"2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd\": container with ID starting with 2e797627b376a06c0ba153b220ff365a5ddd25ee6068c7e8e4075dd6e0be95fd not found: ID does not exist" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.611285 4814 scope.go:117] "RemoveContainer" containerID="6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19" Jan 22 07:04:50 crc kubenswrapper[4814]: E0122 07:04:50.611489 4814 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19\": container with ID starting with 6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19 not found: ID does not exist" containerID="6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19" Jan 22 07:04:50 crc kubenswrapper[4814]: I0122 07:04:50.611512 4814 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19"} err="failed to get container status \"6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19\": rpc error: code = NotFound desc = could not find container \"6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19\": container with ID starting with 6359681abc553cff05b3ec25fda9fa6cd46316aefe81c2bc1fa43ca8220c9b19 not found: ID does not exist" Jan 22 07:04:52 crc kubenswrapper[4814]: I0122 07:04:52.360162 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" path="/var/lib/kubelet/pods/f11a0b1a-66a5-4a63-85f2-0a324dfbb61e/volumes" Jan 22 07:05:19 crc kubenswrapper[4814]: I0122 07:05:19.614401 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:05:19 crc kubenswrapper[4814]: I0122 07:05:19.615051 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:05:49 crc kubenswrapper[4814]: I0122 07:05:49.614058 4814 patch_prober.go:28] interesting pod/machine-config-daemon-f57bg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 22 07:05:49 crc kubenswrapper[4814]: I0122 07:05:49.614617 4814 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 22 07:05:49 crc kubenswrapper[4814]: I0122 07:05:49.614698 4814 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" Jan 22 07:05:49 crc kubenswrapper[4814]: I0122 07:05:49.615457 4814 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa"} pod="openshift-machine-config-operator/machine-config-daemon-f57bg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 22 07:05:49 crc kubenswrapper[4814]: I0122 07:05:49.615519 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerName="machine-config-daemon" containerID="cri-o://cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" gracePeriod=600 Jan 22 07:05:50 crc kubenswrapper[4814]: I0122 07:05:50.052476 4814 generic.go:334] "Generic (PLEG): container finished" podID="362cbfbe-caa3-40b7-906c-80c378b01e0c" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" exitCode=0 Jan 22 07:05:50 crc kubenswrapper[4814]: I0122 07:05:50.052531 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" event={"ID":"362cbfbe-caa3-40b7-906c-80c378b01e0c","Type":"ContainerDied","Data":"cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa"} Jan 22 07:05:50 crc kubenswrapper[4814]: I0122 07:05:50.052860 4814 scope.go:117] "RemoveContainer" containerID="2a89f926dad5dfa6c4e8d50be6391a2e186d120bcf658bb5de70dd8fda44ac64" Jan 22 07:05:50 crc kubenswrapper[4814]: E0122 07:05:50.407904 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:05:51 crc kubenswrapper[4814]: I0122 07:05:51.086301 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:05:51 crc kubenswrapper[4814]: E0122 07:05:51.086861 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:06:03 crc kubenswrapper[4814]: I0122 07:06:03.344056 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:06:03 crc kubenswrapper[4814]: E0122 07:06:03.344747 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:06:18 crc kubenswrapper[4814]: I0122 07:06:18.345214 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:06:18 crc kubenswrapper[4814]: E0122 07:06:18.346549 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:06:31 crc kubenswrapper[4814]: I0122 07:06:31.344145 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:06:31 crc kubenswrapper[4814]: E0122 07:06:31.345467 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:06:46 crc kubenswrapper[4814]: I0122 07:06:46.349466 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:06:46 crc kubenswrapper[4814]: E0122 07:06:46.350603 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.259571 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xlf6r"] Jan 22 07:06:53 crc kubenswrapper[4814]: E0122 07:06:53.260474 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerName="registry-server" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.260490 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerName="registry-server" Jan 22 07:06:53 crc kubenswrapper[4814]: E0122 07:06:53.260509 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerName="extract-utilities" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.260518 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerName="extract-utilities" Jan 22 07:06:53 crc kubenswrapper[4814]: E0122 07:06:53.260534 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerName="extract-content" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.260545 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerName="extract-content" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.260777 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="f11a0b1a-66a5-4a63-85f2-0a324dfbb61e" containerName="registry-server" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.263069 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.274867 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-utilities\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.275123 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-954ml\" (UniqueName: \"kubernetes.io/projected/a9277783-76c6-4e8d-bb11-e8e89789c251-kube-api-access-954ml\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.275212 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-catalog-content\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.278113 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xlf6r"] Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.376855 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-954ml\" (UniqueName: \"kubernetes.io/projected/a9277783-76c6-4e8d-bb11-e8e89789c251-kube-api-access-954ml\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.376973 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-catalog-content\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.377158 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-utilities\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.378107 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-catalog-content\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.378224 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-utilities\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.396201 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-954ml\" (UniqueName: \"kubernetes.io/projected/a9277783-76c6-4e8d-bb11-e8e89789c251-kube-api-access-954ml\") pod \"redhat-operators-xlf6r\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:53 crc kubenswrapper[4814]: I0122 07:06:53.587558 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:06:54 crc kubenswrapper[4814]: I0122 07:06:54.257805 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xlf6r"] Jan 22 07:06:54 crc kubenswrapper[4814]: I0122 07:06:54.727344 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xlf6r" event={"ID":"a9277783-76c6-4e8d-bb11-e8e89789c251","Type":"ContainerStarted","Data":"6826f25f8ecd72c1d303a391fee786ddb4696a911b5a0150edeaf20394d5cb3a"} Jan 22 07:06:54 crc kubenswrapper[4814]: I0122 07:06:54.727482 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xlf6r" event={"ID":"a9277783-76c6-4e8d-bb11-e8e89789c251","Type":"ContainerStarted","Data":"cbec080f8091f9e85bdb47b208c33608d4fcf63bae88e22fa450bc514fb47463"} Jan 22 07:06:55 crc kubenswrapper[4814]: I0122 07:06:55.739650 4814 generic.go:334] "Generic (PLEG): container finished" podID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerID="6826f25f8ecd72c1d303a391fee786ddb4696a911b5a0150edeaf20394d5cb3a" exitCode=0 Jan 22 07:06:55 crc kubenswrapper[4814]: I0122 07:06:55.739709 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xlf6r" event={"ID":"a9277783-76c6-4e8d-bb11-e8e89789c251","Type":"ContainerDied","Data":"6826f25f8ecd72c1d303a391fee786ddb4696a911b5a0150edeaf20394d5cb3a"} Jan 22 07:06:58 crc kubenswrapper[4814]: I0122 07:06:58.772424 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xlf6r" event={"ID":"a9277783-76c6-4e8d-bb11-e8e89789c251","Type":"ContainerStarted","Data":"c63259207ed003c41db6281c3416b0a89e7dedf708ccb57103602efa261fd006"} Jan 22 07:07:00 crc kubenswrapper[4814]: I0122 07:07:00.345531 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:07:00 crc kubenswrapper[4814]: E0122 07:07:00.346367 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:07:05 crc kubenswrapper[4814]: I0122 07:07:05.869522 4814 generic.go:334] "Generic (PLEG): container finished" podID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerID="c63259207ed003c41db6281c3416b0a89e7dedf708ccb57103602efa261fd006" exitCode=0 Jan 22 07:07:05 crc kubenswrapper[4814]: I0122 07:07:05.869731 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xlf6r" event={"ID":"a9277783-76c6-4e8d-bb11-e8e89789c251","Type":"ContainerDied","Data":"c63259207ed003c41db6281c3416b0a89e7dedf708ccb57103602efa261fd006"} Jan 22 07:07:08 crc kubenswrapper[4814]: I0122 07:07:08.910285 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xlf6r" event={"ID":"a9277783-76c6-4e8d-bb11-e8e89789c251","Type":"ContainerStarted","Data":"536a0e33629a9e3e0c798a1deae9ebbeab16751f5a76f5666ba63d67ca55660f"} Jan 22 07:07:08 crc kubenswrapper[4814]: I0122 07:07:08.938378 4814 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xlf6r" podStartSLOduration=4.170955661 podStartE2EDuration="15.93833934s" podCreationTimestamp="2026-01-22 07:06:53 +0000 UTC" firstStartedPulling="2026-01-22 07:06:55.742509914 +0000 UTC m=+6501.825998149" lastFinishedPulling="2026-01-22 07:07:07.509893573 +0000 UTC m=+6513.593381828" observedRunningTime="2026-01-22 07:07:08.929035209 +0000 UTC m=+6515.012523424" watchObservedRunningTime="2026-01-22 07:07:08.93833934 +0000 UTC m=+6515.021827585" Jan 22 07:07:11 crc kubenswrapper[4814]: I0122 07:07:11.343577 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:07:11 crc kubenswrapper[4814]: E0122 07:07:11.345299 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:07:11 crc kubenswrapper[4814]: I0122 07:07:11.949834 4814 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-2q58m" podUID="ce290094-b837-4566-baf6-c829a8dff794" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.48:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 22 07:07:13 crc kubenswrapper[4814]: I0122 07:07:13.588718 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:07:13 crc kubenswrapper[4814]: I0122 07:07:13.589875 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:07:14 crc kubenswrapper[4814]: I0122 07:07:14.639141 4814 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xlf6r" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="registry-server" probeResult="failure" output=< Jan 22 07:07:14 crc kubenswrapper[4814]: timeout: failed to connect service ":50051" within 1s Jan 22 07:07:14 crc kubenswrapper[4814]: > Jan 22 07:07:23 crc kubenswrapper[4814]: I0122 07:07:23.665684 4814 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:07:23 crc kubenswrapper[4814]: I0122 07:07:23.743430 4814 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:07:24 crc kubenswrapper[4814]: I0122 07:07:24.453353 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xlf6r"] Jan 22 07:07:25 crc kubenswrapper[4814]: I0122 07:07:25.053879 4814 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xlf6r" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="registry-server" containerID="cri-o://536a0e33629a9e3e0c798a1deae9ebbeab16751f5a76f5666ba63d67ca55660f" gracePeriod=2 Jan 22 07:07:25 crc kubenswrapper[4814]: I0122 07:07:25.344277 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:07:25 crc kubenswrapper[4814]: E0122 07:07:25.344548 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.067547 4814 generic.go:334] "Generic (PLEG): container finished" podID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerID="536a0e33629a9e3e0c798a1deae9ebbeab16751f5a76f5666ba63d67ca55660f" exitCode=0 Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.067853 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xlf6r" event={"ID":"a9277783-76c6-4e8d-bb11-e8e89789c251","Type":"ContainerDied","Data":"536a0e33629a9e3e0c798a1deae9ebbeab16751f5a76f5666ba63d67ca55660f"} Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.214624 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.287723 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-954ml\" (UniqueName: \"kubernetes.io/projected/a9277783-76c6-4e8d-bb11-e8e89789c251-kube-api-access-954ml\") pod \"a9277783-76c6-4e8d-bb11-e8e89789c251\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.287802 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-utilities\") pod \"a9277783-76c6-4e8d-bb11-e8e89789c251\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.287981 4814 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-catalog-content\") pod \"a9277783-76c6-4e8d-bb11-e8e89789c251\" (UID: \"a9277783-76c6-4e8d-bb11-e8e89789c251\") " Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.291021 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-utilities" (OuterVolumeSpecName: "utilities") pod "a9277783-76c6-4e8d-bb11-e8e89789c251" (UID: "a9277783-76c6-4e8d-bb11-e8e89789c251"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.298840 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9277783-76c6-4e8d-bb11-e8e89789c251-kube-api-access-954ml" (OuterVolumeSpecName: "kube-api-access-954ml") pod "a9277783-76c6-4e8d-bb11-e8e89789c251" (UID: "a9277783-76c6-4e8d-bb11-e8e89789c251"). InnerVolumeSpecName "kube-api-access-954ml". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.390579 4814 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-954ml\" (UniqueName: \"kubernetes.io/projected/a9277783-76c6-4e8d-bb11-e8e89789c251-kube-api-access-954ml\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.390935 4814 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-utilities\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.419695 4814 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a9277783-76c6-4e8d-bb11-e8e89789c251" (UID: "a9277783-76c6-4e8d-bb11-e8e89789c251"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 22 07:07:26 crc kubenswrapper[4814]: I0122 07:07:26.492899 4814 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9277783-76c6-4e8d-bb11-e8e89789c251-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 22 07:07:27 crc kubenswrapper[4814]: I0122 07:07:27.085507 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xlf6r" event={"ID":"a9277783-76c6-4e8d-bb11-e8e89789c251","Type":"ContainerDied","Data":"cbec080f8091f9e85bdb47b208c33608d4fcf63bae88e22fa450bc514fb47463"} Jan 22 07:07:27 crc kubenswrapper[4814]: I0122 07:07:27.087062 4814 scope.go:117] "RemoveContainer" containerID="536a0e33629a9e3e0c798a1deae9ebbeab16751f5a76f5666ba63d67ca55660f" Jan 22 07:07:27 crc kubenswrapper[4814]: I0122 07:07:27.085646 4814 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xlf6r" Jan 22 07:07:27 crc kubenswrapper[4814]: I0122 07:07:27.125243 4814 scope.go:117] "RemoveContainer" containerID="c63259207ed003c41db6281c3416b0a89e7dedf708ccb57103602efa261fd006" Jan 22 07:07:27 crc kubenswrapper[4814]: I0122 07:07:27.127061 4814 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xlf6r"] Jan 22 07:07:27 crc kubenswrapper[4814]: I0122 07:07:27.138990 4814 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xlf6r"] Jan 22 07:07:27 crc kubenswrapper[4814]: I0122 07:07:27.160726 4814 scope.go:117] "RemoveContainer" containerID="6826f25f8ecd72c1d303a391fee786ddb4696a911b5a0150edeaf20394d5cb3a" Jan 22 07:07:28 crc kubenswrapper[4814]: I0122 07:07:28.360652 4814 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" path="/var/lib/kubelet/pods/a9277783-76c6-4e8d-bb11-e8e89789c251/volumes" Jan 22 07:07:39 crc kubenswrapper[4814]: I0122 07:07:39.343388 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:07:39 crc kubenswrapper[4814]: E0122 07:07:39.344038 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.143109 4814 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4g4t7"] Jan 22 07:07:48 crc kubenswrapper[4814]: E0122 07:07:48.145149 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="registry-server" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.145242 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="registry-server" Jan 22 07:07:48 crc kubenswrapper[4814]: E0122 07:07:48.145334 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="extract-utilities" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.145411 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="extract-utilities" Jan 22 07:07:48 crc kubenswrapper[4814]: E0122 07:07:48.145483 4814 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="extract-content" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.145541 4814 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="extract-content" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.145881 4814 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9277783-76c6-4e8d-bb11-e8e89789c251" containerName="registry-server" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.147694 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.184971 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4g4t7"] Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.283838 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v7w6\" (UniqueName: \"kubernetes.io/projected/f71101e2-0a56-4932-aba8-e65693fc7157-kube-api-access-7v7w6\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.283922 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f71101e2-0a56-4932-aba8-e65693fc7157-catalog-content\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.283954 4814 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f71101e2-0a56-4932-aba8-e65693fc7157-utilities\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.386204 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v7w6\" (UniqueName: \"kubernetes.io/projected/f71101e2-0a56-4932-aba8-e65693fc7157-kube-api-access-7v7w6\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.387620 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f71101e2-0a56-4932-aba8-e65693fc7157-catalog-content\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.389442 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f71101e2-0a56-4932-aba8-e65693fc7157-catalog-content\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.389622 4814 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f71101e2-0a56-4932-aba8-e65693fc7157-utilities\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.390261 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f71101e2-0a56-4932-aba8-e65693fc7157-utilities\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.406178 4814 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v7w6\" (UniqueName: \"kubernetes.io/projected/f71101e2-0a56-4932-aba8-e65693fc7157-kube-api-access-7v7w6\") pod \"redhat-marketplace-4g4t7\" (UID: \"f71101e2-0a56-4932-aba8-e65693fc7157\") " pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:48 crc kubenswrapper[4814]: I0122 07:07:48.468765 4814 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4g4t7" Jan 22 07:07:49 crc kubenswrapper[4814]: I0122 07:07:49.132672 4814 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4g4t7"] Jan 22 07:07:49 crc kubenswrapper[4814]: W0122 07:07:49.146116 4814 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf71101e2_0a56_4932_aba8_e65693fc7157.slice/crio-398e8b895289be391b4abdef87469870cac85676a4aa7194c27760a0b8a68d9f WatchSource:0}: Error finding container 398e8b895289be391b4abdef87469870cac85676a4aa7194c27760a0b8a68d9f: Status 404 returned error can't find the container with id 398e8b895289be391b4abdef87469870cac85676a4aa7194c27760a0b8a68d9f Jan 22 07:07:49 crc kubenswrapper[4814]: I0122 07:07:49.403284 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4g4t7" event={"ID":"f71101e2-0a56-4932-aba8-e65693fc7157","Type":"ContainerStarted","Data":"398e8b895289be391b4abdef87469870cac85676a4aa7194c27760a0b8a68d9f"} Jan 22 07:07:50 crc kubenswrapper[4814]: I0122 07:07:50.346922 4814 scope.go:117] "RemoveContainer" containerID="cf15340696864686bfc48106b44c4cfa1e8ac7aed386c00e1bd3815f6bc9dbaa" Jan 22 07:07:50 crc kubenswrapper[4814]: E0122 07:07:50.347769 4814 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-f57bg_openshift-machine-config-operator(362cbfbe-caa3-40b7-906c-80c378b01e0c)\"" pod="openshift-machine-config-operator/machine-config-daemon-f57bg" podUID="362cbfbe-caa3-40b7-906c-80c378b01e0c" Jan 22 07:07:50 crc kubenswrapper[4814]: I0122 07:07:50.414040 4814 generic.go:334] "Generic (PLEG): container finished" podID="f71101e2-0a56-4932-aba8-e65693fc7157" containerID="f53b7c289bf7e7b19b0b35485c164bd88a248b5ec6b6b12f6ed40fd034aa4b5b" exitCode=0 Jan 22 07:07:50 crc kubenswrapper[4814]: I0122 07:07:50.414084 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4g4t7" event={"ID":"f71101e2-0a56-4932-aba8-e65693fc7157","Type":"ContainerDied","Data":"f53b7c289bf7e7b19b0b35485c164bd88a248b5ec6b6b12f6ed40fd034aa4b5b"} Jan 22 07:07:53 crc kubenswrapper[4814]: I0122 07:07:53.439694 4814 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4g4t7" event={"ID":"f71101e2-0a56-4932-aba8-e65693fc7157","Type":"ContainerStarted","Data":"49e74c275b42d11497c5a4be1cf75c0fc9c119e80c24a3f2717cc5fc0c2fb071"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515134346325024453 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015134346325017370 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015134331106016502 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015134331106015452 5ustar corecore